diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 15:34:42 -0500 |
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 15:34:42 -0500 |
| commit | 8e22e1b3499a446df48c2b26667ca36c55bf864c (patch) | |
| tree | 5329f98b3eb3c95a9dcbab0fa4f9b6e62f0e788d /drivers/gpu/drm | |
| parent | 00d3c14f14d51babd8aeafd5fa734ccf04f5ca3d (diff) | |
| parent | 64a577196d66b44e37384bc5c4d78c61f59d5b2a (diff) | |
Merge airlied/drm-next into drm-misc-next
Backmerge the main pull request to sync up with all the newly landed
drivers. Otherwise we'll have chaos even before 4.12 started in
earnest.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm')
341 files changed, 14884 insertions, 5064 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 90bc65d07a35..88e01e08e279 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
| @@ -263,6 +263,8 @@ source "drivers/gpu/drm/mxsfb/Kconfig" | |||
| 263 | 263 | ||
| 264 | source "drivers/gpu/drm/meson/Kconfig" | 264 | source "drivers/gpu/drm/meson/Kconfig" |
| 265 | 265 | ||
| 266 | source "drivers/gpu/drm/tinydrm/Kconfig" | ||
| 267 | |||
| 266 | # Keep legacy drivers last | 268 | # Keep legacy drivers last |
| 267 | 269 | ||
| 268 | menuconfig DRM_LEGACY | 270 | menuconfig DRM_LEGACY |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 92de3991fa56..3ee95793d122 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
| @@ -94,3 +94,4 @@ obj-$(CONFIG_DRM_ARCPGU)+= arc/ | |||
| 94 | obj-y += hisilicon/ | 94 | obj-y += hisilicon/ |
| 95 | obj-$(CONFIG_DRM_ZTE) += zte/ | 95 | obj-$(CONFIG_DRM_ZTE) += zte/ |
| 96 | obj-$(CONFIG_DRM_MXSFB) += mxsfb/ | 96 | obj-$(CONFIG_DRM_MXSFB) += mxsfb/ |
| 97 | obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 94a64e3bc682..c1b913541739 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -1037,7 +1037,6 @@ struct amdgpu_uvd { | |||
| 1037 | bool use_ctx_buf; | 1037 | bool use_ctx_buf; |
| 1038 | struct amd_sched_entity entity; | 1038 | struct amd_sched_entity entity; |
| 1039 | uint32_t srbm_soft_reset; | 1039 | uint32_t srbm_soft_reset; |
| 1040 | bool is_powergated; | ||
| 1041 | }; | 1040 | }; |
| 1042 | 1041 | ||
| 1043 | /* | 1042 | /* |
| @@ -1066,7 +1065,6 @@ struct amdgpu_vce { | |||
| 1066 | struct amd_sched_entity entity; | 1065 | struct amd_sched_entity entity; |
| 1067 | uint32_t srbm_soft_reset; | 1066 | uint32_t srbm_soft_reset; |
| 1068 | unsigned num_rings; | 1067 | unsigned num_rings; |
| 1069 | bool is_powergated; | ||
| 1070 | }; | 1068 | }; |
| 1071 | 1069 | ||
| 1072 | /* | 1070 | /* |
| @@ -1484,6 +1482,9 @@ struct amdgpu_device { | |||
| 1484 | spinlock_t gtt_list_lock; | 1482 | spinlock_t gtt_list_lock; |
| 1485 | struct list_head gtt_list; | 1483 | struct list_head gtt_list; |
| 1486 | 1484 | ||
| 1485 | /* record hw reset is performed */ | ||
| 1486 | bool has_hw_reset; | ||
| 1487 | |||
| 1487 | }; | 1488 | }; |
| 1488 | 1489 | ||
| 1489 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) | 1490 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) |
| @@ -1702,13 +1703,14 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
| 1702 | int amdgpu_gpu_reset(struct amdgpu_device *adev); | 1703 | int amdgpu_gpu_reset(struct amdgpu_device *adev); |
| 1703 | bool amdgpu_need_backup(struct amdgpu_device *adev); | 1704 | bool amdgpu_need_backup(struct amdgpu_device *adev); |
| 1704 | void amdgpu_pci_config_reset(struct amdgpu_device *adev); | 1705 | void amdgpu_pci_config_reset(struct amdgpu_device *adev); |
| 1705 | bool amdgpu_card_posted(struct amdgpu_device *adev); | 1706 | bool amdgpu_need_post(struct amdgpu_device *adev); |
| 1706 | void amdgpu_update_display_priority(struct amdgpu_device *adev); | 1707 | void amdgpu_update_display_priority(struct amdgpu_device *adev); |
| 1707 | 1708 | ||
| 1708 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); | 1709 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); |
| 1709 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | 1710 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
| 1710 | u32 ip_instance, u32 ring, | 1711 | u32 ip_instance, u32 ring, |
| 1711 | struct amdgpu_ring **out_ring); | 1712 | struct amdgpu_ring **out_ring); |
| 1713 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); | ||
| 1712 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); | 1714 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); |
| 1713 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); | 1715 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
| 1714 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); | 1716 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index d9def01f276e..821f7cc2051f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | |||
| @@ -100,7 +100,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) | |||
| 100 | resource_size_t size = 256 * 1024; /* ??? */ | 100 | resource_size_t size = 256 * 1024; /* ??? */ |
| 101 | 101 | ||
| 102 | if (!(adev->flags & AMD_IS_APU)) | 102 | if (!(adev->flags & AMD_IS_APU)) |
| 103 | if (!amdgpu_card_posted(adev)) | 103 | if (amdgpu_need_post(adev)) |
| 104 | return false; | 104 | return false; |
| 105 | 105 | ||
| 106 | adev->bios = NULL; | 106 | adev->bios = NULL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a5df1ef306d9..d9e5aa4a79ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
| @@ -834,32 +834,57 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
| 834 | case CHIP_TOPAZ: | 834 | case CHIP_TOPAZ: |
| 835 | if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || | 835 | if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || |
| 836 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || | 836 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || |
| 837 | ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) | 837 | ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { |
| 838 | info->is_kicker = true; | ||
| 838 | strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); | 839 | strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); |
| 839 | else | 840 | } else |
| 840 | strcpy(fw_name, "amdgpu/topaz_smc.bin"); | 841 | strcpy(fw_name, "amdgpu/topaz_smc.bin"); |
| 841 | break; | 842 | break; |
| 842 | case CHIP_TONGA: | 843 | case CHIP_TONGA: |
| 843 | if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || | 844 | if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || |
| 844 | ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) | 845 | ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) { |
| 846 | info->is_kicker = true; | ||
| 845 | strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); | 847 | strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); |
| 846 | else | 848 | } else |
| 847 | strcpy(fw_name, "amdgpu/tonga_smc.bin"); | 849 | strcpy(fw_name, "amdgpu/tonga_smc.bin"); |
| 848 | break; | 850 | break; |
| 849 | case CHIP_FIJI: | 851 | case CHIP_FIJI: |
| 850 | strcpy(fw_name, "amdgpu/fiji_smc.bin"); | 852 | strcpy(fw_name, "amdgpu/fiji_smc.bin"); |
| 851 | break; | 853 | break; |
| 852 | case CHIP_POLARIS11: | 854 | case CHIP_POLARIS11: |
| 853 | if (type == CGS_UCODE_ID_SMU) | 855 | if (type == CGS_UCODE_ID_SMU) { |
| 854 | strcpy(fw_name, "amdgpu/polaris11_smc.bin"); | 856 | if (((adev->pdev->device == 0x67ef) && |
| 855 | else if (type == CGS_UCODE_ID_SMU_SK) | 857 | ((adev->pdev->revision == 0xe0) || |
| 858 | (adev->pdev->revision == 0xe2) || | ||
| 859 | (adev->pdev->revision == 0xe5))) || | ||
| 860 | ((adev->pdev->device == 0x67ff) && | ||
| 861 | ((adev->pdev->revision == 0xcf) || | ||
| 862 | (adev->pdev->revision == 0xef) || | ||
| 863 | (adev->pdev->revision == 0xff)))) { | ||
| 864 | info->is_kicker = true; | ||
| 865 | strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); | ||
| 866 | } else | ||
| 867 | strcpy(fw_name, "amdgpu/polaris11_smc.bin"); | ||
| 868 | } else if (type == CGS_UCODE_ID_SMU_SK) { | ||
| 856 | strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); | 869 | strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); |
| 870 | } | ||
| 857 | break; | 871 | break; |
| 858 | case CHIP_POLARIS10: | 872 | case CHIP_POLARIS10: |
| 859 | if (type == CGS_UCODE_ID_SMU) | 873 | if (type == CGS_UCODE_ID_SMU) { |
| 860 | strcpy(fw_name, "amdgpu/polaris10_smc.bin"); | 874 | if ((adev->pdev->device == 0x67df) && |
| 861 | else if (type == CGS_UCODE_ID_SMU_SK) | 875 | ((adev->pdev->revision == 0xe0) || |
| 876 | (adev->pdev->revision == 0xe3) || | ||
| 877 | (adev->pdev->revision == 0xe4) || | ||
| 878 | (adev->pdev->revision == 0xe5) || | ||
| 879 | (adev->pdev->revision == 0xe7) || | ||
| 880 | (adev->pdev->revision == 0xef))) { | ||
| 881 | info->is_kicker = true; | ||
| 882 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); | ||
| 883 | } else | ||
| 884 | strcpy(fw_name, "amdgpu/polaris10_smc.bin"); | ||
| 885 | } else if (type == CGS_UCODE_ID_SMU_SK) { | ||
| 862 | strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); | 886 | strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); |
| 887 | } | ||
| 863 | break; | 888 | break; |
| 864 | case CHIP_POLARIS12: | 889 | case CHIP_POLARIS12: |
| 865 | strcpy(fw_name, "amdgpu/polaris12_smc.bin"); | 890 | strcpy(fw_name, "amdgpu/polaris12_smc.bin"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index cf2e8c4e9b8b..d2d0f60ff36d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
| 83 | } | 83 | } |
| 84 | break; | 84 | break; |
| 85 | } | 85 | } |
| 86 | |||
| 87 | if (!(*out_ring && (*out_ring)->adev)) { | ||
| 88 | DRM_ERROR("Ring %d is not initialized on IP %d\n", | ||
| 89 | ring, ip_type); | ||
| 90 | return -EINVAL; | ||
| 91 | } | ||
| 92 | |||
| 86 | return 0; | 93 | return 0; |
| 87 | } | 94 | } |
| 88 | 95 | ||
| @@ -344,8 +351,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) | |||
| 344 | * submission. This can result in a debt that can stop buffer migrations | 351 | * submission. This can result in a debt that can stop buffer migrations |
| 345 | * temporarily. | 352 | * temporarily. |
| 346 | */ | 353 | */ |
| 347 | static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, | 354 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes) |
| 348 | u64 num_bytes) | ||
| 349 | { | 355 | { |
| 350 | spin_lock(&adev->mm_stats.lock); | 356 | spin_lock(&adev->mm_stats.lock); |
| 351 | adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); | 357 | adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 944ba0d3874a..6abb238b25c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -619,25 +619,29 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) | |||
| 619 | * GPU helpers function. | 619 | * GPU helpers function. |
| 620 | */ | 620 | */ |
| 621 | /** | 621 | /** |
| 622 | * amdgpu_card_posted - check if the hw has already been initialized | 622 | * amdgpu_need_post - check if the hw need post or not |
| 623 | * | 623 | * |
| 624 | * @adev: amdgpu_device pointer | 624 | * @adev: amdgpu_device pointer |
| 625 | * | 625 | * |
| 626 | * Check if the asic has been initialized (all asics). | 626 | * Check if the asic has been initialized (all asics) at driver startup |
| 627 | * Used at driver startup. | 627 | * or post is needed if hw reset is performed. |
| 628 | * Returns true if initialized or false if not. | 628 | * Returns true if need or false if not. |
| 629 | */ | 629 | */ |
| 630 | bool amdgpu_card_posted(struct amdgpu_device *adev) | 630 | bool amdgpu_need_post(struct amdgpu_device *adev) |
| 631 | { | 631 | { |
| 632 | uint32_t reg; | 632 | uint32_t reg; |
| 633 | 633 | ||
| 634 | if (adev->has_hw_reset) { | ||
| 635 | adev->has_hw_reset = false; | ||
| 636 | return true; | ||
| 637 | } | ||
| 634 | /* then check MEM_SIZE, in case the crtcs are off */ | 638 | /* then check MEM_SIZE, in case the crtcs are off */ |
| 635 | reg = RREG32(mmCONFIG_MEMSIZE); | 639 | reg = RREG32(mmCONFIG_MEMSIZE); |
| 636 | 640 | ||
| 637 | if (reg) | 641 | if (reg) |
| 638 | return true; | 642 | return false; |
| 639 | 643 | ||
| 640 | return false; | 644 | return true; |
| 641 | 645 | ||
| 642 | } | 646 | } |
| 643 | 647 | ||
| @@ -665,7 +669,7 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) | |||
| 665 | return true; | 669 | return true; |
| 666 | } | 670 | } |
| 667 | } | 671 | } |
| 668 | return !amdgpu_card_posted(adev); | 672 | return amdgpu_need_post(adev); |
| 669 | } | 673 | } |
| 670 | 674 | ||
| 671 | /** | 675 | /** |
| @@ -2071,7 +2075,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
| 2071 | amdgpu_atombios_scratch_regs_restore(adev); | 2075 | amdgpu_atombios_scratch_regs_restore(adev); |
| 2072 | 2076 | ||
| 2073 | /* post card */ | 2077 | /* post card */ |
| 2074 | if (!amdgpu_card_posted(adev) || !resume) { | 2078 | if (amdgpu_need_post(adev)) { |
| 2075 | r = amdgpu_atom_asic_init(adev->mode_info.atom_context); | 2079 | r = amdgpu_atom_asic_init(adev->mode_info.atom_context); |
| 2076 | if (r) | 2080 | if (r) |
| 2077 | DRM_ERROR("amdgpu asic init failed\n"); | 2081 | DRM_ERROR("amdgpu asic init failed\n"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 9bd1b4eae32e..51d759463384 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
| @@ -487,67 +487,44 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) | |||
| 487 | * | 487 | * |
| 488 | * @adev: amdgpu_device pointer | 488 | * @adev: amdgpu_device pointer |
| 489 | * @bo_va: bo_va to update | 489 | * @bo_va: bo_va to update |
| 490 | * @list: validation list | ||
| 491 | * @operation: map or unmap | ||
| 490 | * | 492 | * |
| 491 | * Update the bo_va directly after setting it's address. Errors are not | 493 | * Update the bo_va directly after setting its address. Errors are not |
| 492 | * vital here, so they are not reported back to userspace. | 494 | * vital here, so they are not reported back to userspace. |
| 493 | */ | 495 | */ |
| 494 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | 496 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, |
| 495 | struct amdgpu_bo_va *bo_va, | 497 | struct amdgpu_bo_va *bo_va, |
| 498 | struct list_head *list, | ||
| 496 | uint32_t operation) | 499 | uint32_t operation) |
| 497 | { | 500 | { |
| 498 | struct ttm_validate_buffer tv, *entry; | 501 | struct ttm_validate_buffer *entry; |
| 499 | struct amdgpu_bo_list_entry vm_pd; | 502 | int r = -ERESTARTSYS; |
| 500 | struct ww_acquire_ctx ticket; | ||
| 501 | struct list_head list, duplicates; | ||
| 502 | int r; | ||
| 503 | |||
| 504 | INIT_LIST_HEAD(&list); | ||
| 505 | INIT_LIST_HEAD(&duplicates); | ||
| 506 | |||
| 507 | tv.bo = &bo_va->bo->tbo; | ||
| 508 | tv.shared = true; | ||
| 509 | list_add(&tv.head, &list); | ||
| 510 | |||
| 511 | amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); | ||
| 512 | 503 | ||
| 513 | /* Provide duplicates to avoid -EALREADY */ | 504 | list_for_each_entry(entry, list, head) { |
| 514 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | ||
| 515 | if (r) | ||
| 516 | goto error_print; | ||
| 517 | |||
| 518 | list_for_each_entry(entry, &list, head) { | ||
| 519 | struct amdgpu_bo *bo = | 505 | struct amdgpu_bo *bo = |
| 520 | container_of(entry->bo, struct amdgpu_bo, tbo); | 506 | container_of(entry->bo, struct amdgpu_bo, tbo); |
| 521 | 507 | if (amdgpu_gem_va_check(NULL, bo)) | |
| 522 | /* if anything is swapped out don't swap it in here, | 508 | goto error; |
| 523 | just abort and wait for the next CS */ | ||
| 524 | if (!amdgpu_bo_gpu_accessible(bo)) | ||
| 525 | goto error_unreserve; | ||
| 526 | |||
| 527 | if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) | ||
| 528 | goto error_unreserve; | ||
| 529 | } | 509 | } |
| 530 | 510 | ||
| 531 | r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, | 511 | r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, |
| 532 | NULL); | 512 | NULL); |
| 533 | if (r) | 513 | if (r) |
| 534 | goto error_unreserve; | 514 | goto error; |
| 535 | 515 | ||
| 536 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | 516 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); |
| 537 | if (r) | 517 | if (r) |
| 538 | goto error_unreserve; | 518 | goto error; |
| 539 | 519 | ||
| 540 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); | 520 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); |
| 541 | if (r) | 521 | if (r) |
| 542 | goto error_unreserve; | 522 | goto error; |
| 543 | 523 | ||
| 544 | if (operation == AMDGPU_VA_OP_MAP) | 524 | if (operation == AMDGPU_VA_OP_MAP) |
| 545 | r = amdgpu_vm_bo_update(adev, bo_va, false); | 525 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
| 546 | 526 | ||
| 547 | error_unreserve: | 527 | error: |
| 548 | ttm_eu_backoff_reservation(&ticket, &list); | ||
| 549 | |||
| 550 | error_print: | ||
| 551 | if (r && r != -ERESTARTSYS) | 528 | if (r && r != -ERESTARTSYS) |
| 552 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); | 529 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
| 553 | } | 530 | } |
| @@ -564,7 +541,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 564 | struct amdgpu_bo_list_entry vm_pd; | 541 | struct amdgpu_bo_list_entry vm_pd; |
| 565 | struct ttm_validate_buffer tv; | 542 | struct ttm_validate_buffer tv; |
| 566 | struct ww_acquire_ctx ticket; | 543 | struct ww_acquire_ctx ticket; |
| 567 | struct list_head list, duplicates; | 544 | struct list_head list; |
| 568 | uint32_t invalid_flags, va_flags = 0; | 545 | uint32_t invalid_flags, va_flags = 0; |
| 569 | int r = 0; | 546 | int r = 0; |
| 570 | 547 | ||
| @@ -602,14 +579,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 602 | return -ENOENT; | 579 | return -ENOENT; |
| 603 | abo = gem_to_amdgpu_bo(gobj); | 580 | abo = gem_to_amdgpu_bo(gobj); |
| 604 | INIT_LIST_HEAD(&list); | 581 | INIT_LIST_HEAD(&list); |
| 605 | INIT_LIST_HEAD(&duplicates); | ||
| 606 | tv.bo = &abo->tbo; | 582 | tv.bo = &abo->tbo; |
| 607 | tv.shared = true; | 583 | tv.shared = false; |
| 608 | list_add(&tv.head, &list); | 584 | list_add(&tv.head, &list); |
| 609 | 585 | ||
| 610 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); | 586 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); |
| 611 | 587 | ||
| 612 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | 588 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); |
| 613 | if (r) { | 589 | if (r) { |
| 614 | drm_gem_object_unreference_unlocked(gobj); | 590 | drm_gem_object_unreference_unlocked(gobj); |
| 615 | return r; | 591 | return r; |
| @@ -640,10 +616,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 640 | default: | 616 | default: |
| 641 | break; | 617 | break; |
| 642 | } | 618 | } |
| 643 | ttm_eu_backoff_reservation(&ticket, &list); | ||
| 644 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && | 619 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && |
| 645 | !amdgpu_vm_debug) | 620 | !amdgpu_vm_debug) |
| 646 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); | 621 | amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation); |
| 622 | ttm_eu_backoff_reservation(&ticket, &list); | ||
| 647 | 623 | ||
| 648 | drm_gem_object_unreference_unlocked(gobj); | 624 | drm_gem_object_unreference_unlocked(gobj); |
| 649 | return r; | 625 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index d1aa291b2638..be80a4a68d7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
| @@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
| 323 | struct amdgpu_bo *bo; | 323 | struct amdgpu_bo *bo; |
| 324 | enum ttm_bo_type type; | 324 | enum ttm_bo_type type; |
| 325 | unsigned long page_align; | 325 | unsigned long page_align; |
| 326 | u64 initial_bytes_moved; | ||
| 326 | size_t acc_size; | 327 | size_t acc_size; |
| 327 | int r; | 328 | int r; |
| 328 | 329 | ||
| @@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
| 374 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 | 375 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
| 375 | */ | 376 | */ |
| 376 | 377 | ||
| 378 | #ifndef CONFIG_COMPILE_TEST | ||
| 377 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ | 379 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
| 378 | thanks to write-combining | 380 | thanks to write-combining |
| 381 | #endif | ||
| 379 | 382 | ||
| 380 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) | 383 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) |
| 381 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " | 384 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
| @@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
| 399 | locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock); | 402 | locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock); |
| 400 | WARN_ON(!locked); | 403 | WARN_ON(!locked); |
| 401 | } | 404 | } |
| 405 | |||
| 406 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); | ||
| 402 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, | 407 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, |
| 403 | &bo->placement, page_align, !kernel, NULL, | 408 | &bo->placement, page_align, !kernel, NULL, |
| 404 | acc_size, sg, resv ? resv : &bo->tbo.ttm_resv, | 409 | acc_size, sg, resv ? resv : &bo->tbo.ttm_resv, |
| 405 | &amdgpu_ttm_bo_destroy); | 410 | &amdgpu_ttm_bo_destroy); |
| 406 | if (unlikely(r != 0)) | 411 | amdgpu_cs_report_moved_bytes(adev, |
| 412 | atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved); | ||
| 413 | |||
| 414 | if (unlikely(r != 0)) { | ||
| 415 | if (!resv) | ||
| 416 | ww_mutex_unlock(&bo->tbo.resv->lock); | ||
| 407 | return r; | 417 | return r; |
| 418 | } | ||
| 408 | 419 | ||
| 409 | bo->tbo.priority = ilog2(bo->tbo.num_pages); | 420 | bo->tbo.priority = ilog2(bo->tbo.num_pages); |
| 410 | if (kernel) | 421 | if (kernel) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index a61882ddc804..346e80a7119b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -1142,12 +1142,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) | |||
| 1142 | /* XXX select vce level based on ring/task */ | 1142 | /* XXX select vce level based on ring/task */ |
| 1143 | adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; | 1143 | adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; |
| 1144 | mutex_unlock(&adev->pm.mutex); | 1144 | mutex_unlock(&adev->pm.mutex); |
| 1145 | amdgpu_pm_compute_clocks(adev); | ||
| 1146 | amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1147 | AMD_PG_STATE_UNGATE); | ||
| 1148 | amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1149 | AMD_CG_STATE_UNGATE); | ||
| 1145 | } else { | 1150 | } else { |
| 1151 | amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1152 | AMD_PG_STATE_GATE); | ||
| 1153 | amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1154 | AMD_CG_STATE_GATE); | ||
| 1146 | mutex_lock(&adev->pm.mutex); | 1155 | mutex_lock(&adev->pm.mutex); |
| 1147 | adev->pm.dpm.vce_active = false; | 1156 | adev->pm.dpm.vce_active = false; |
| 1148 | mutex_unlock(&adev->pm.mutex); | 1157 | mutex_unlock(&adev->pm.mutex); |
| 1158 | amdgpu_pm_compute_clocks(adev); | ||
| 1149 | } | 1159 | } |
| 1150 | amdgpu_pm_compute_clocks(adev); | 1160 | |
| 1151 | } | 1161 | } |
| 1152 | } | 1162 | } |
| 1153 | 1163 | ||
| @@ -1286,7 +1296,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
| 1286 | if (!adev->pm.dpm_enabled) | 1296 | if (!adev->pm.dpm_enabled) |
| 1287 | return; | 1297 | return; |
| 1288 | 1298 | ||
| 1289 | amdgpu_display_bandwidth_update(adev); | 1299 | if (adev->mode_info.num_crtc) |
| 1300 | amdgpu_display_bandwidth_update(adev); | ||
| 1290 | 1301 | ||
| 1291 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 1302 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
| 1292 | struct amdgpu_ring *ring = adev->rings[i]; | 1303 | struct amdgpu_ring *ring = adev->rings[i]; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1154b0a8881d..4c6094eefc51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -529,6 +529,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ | |||
| 529 | case TTM_PL_TT: | 529 | case TTM_PL_TT: |
| 530 | break; | 530 | break; |
| 531 | case TTM_PL_VRAM: | 531 | case TTM_PL_VRAM: |
| 532 | if (mem->start == AMDGPU_BO_INVALID_OFFSET) | ||
| 533 | return -EINVAL; | ||
| 534 | |||
| 532 | mem->bus.offset = mem->start << PAGE_SHIFT; | 535 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| 533 | /* check if it's visible */ | 536 | /* check if it's visible */ |
| 534 | if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) | 537 | if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 6f62ac473064..6d6ab7f11b4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | |||
| 1113 | amdgpu_dpm_enable_uvd(adev, false); | 1113 | amdgpu_dpm_enable_uvd(adev, false); |
| 1114 | } else { | 1114 | } else { |
| 1115 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | 1115 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); |
| 1116 | /* shutdown the UVD block */ | ||
| 1117 | amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
| 1118 | AMD_PG_STATE_GATE); | ||
| 1119 | amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
| 1120 | AMD_CG_STATE_GATE); | ||
| 1116 | } | 1121 | } |
| 1117 | } else { | 1122 | } else { |
| 1118 | schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); | 1123 | schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
| @@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | |||
| 1129 | amdgpu_dpm_enable_uvd(adev, true); | 1134 | amdgpu_dpm_enable_uvd(adev, true); |
| 1130 | } else { | 1135 | } else { |
| 1131 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | 1136 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); |
| 1137 | amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
| 1138 | AMD_CG_STATE_UNGATE); | ||
| 1139 | amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
| 1140 | AMD_PG_STATE_UNGATE); | ||
| 1132 | } | 1141 | } |
| 1133 | } | 1142 | } |
| 1134 | } | 1143 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 79bc9c7aad45..e2c06780ce49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
| @@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) | |||
| 321 | amdgpu_dpm_enable_vce(adev, false); | 321 | amdgpu_dpm_enable_vce(adev, false); |
| 322 | } else { | 322 | } else { |
| 323 | amdgpu_asic_set_vce_clocks(adev, 0, 0); | 323 | amdgpu_asic_set_vce_clocks(adev, 0, 0); |
| 324 | amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 325 | AMD_PG_STATE_GATE); | ||
| 326 | amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 327 | AMD_CG_STATE_GATE); | ||
| 324 | } | 328 | } |
| 325 | } else { | 329 | } else { |
| 326 | schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); | 330 | schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); |
| @@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) | |||
| 346 | amdgpu_dpm_enable_vce(adev, true); | 350 | amdgpu_dpm_enable_vce(adev, true); |
| 347 | } else { | 351 | } else { |
| 348 | amdgpu_asic_set_vce_clocks(adev, 53300, 40000); | 352 | amdgpu_asic_set_vce_clocks(adev, 53300, 40000); |
| 353 | amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 354 | AMD_CG_STATE_UNGATE); | ||
| 355 | amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 356 | AMD_PG_STATE_UNGATE); | ||
| 357 | |||
| 349 | } | 358 | } |
| 350 | } | 359 | } |
| 351 | mutex_unlock(&adev->vce.idle_mutex); | 360 | mutex_unlock(&adev->vce.idle_mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3fd951c71d1b..dcfb7df3caf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | |||
| @@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 83 | DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); | 83 | DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); |
| 84 | amdgpu_vm_bo_rmv(adev, bo_va); | 84 | amdgpu_vm_bo_rmv(adev, bo_va); |
| 85 | ttm_eu_backoff_reservation(&ticket, &list); | 85 | ttm_eu_backoff_reservation(&ticket, &list); |
| 86 | kfree(bo_va); | ||
| 87 | return r; | 86 | return r; |
| 88 | } | 87 | } |
| 89 | 88 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 9498e78b90d7..f97ecb49972e 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev) | |||
| 2210 | 2210 | ||
| 2211 | static int ci_upload_firmware(struct amdgpu_device *adev) | 2211 | static int ci_upload_firmware(struct amdgpu_device *adev) |
| 2212 | { | 2212 | { |
| 2213 | struct ci_power_info *pi = ci_get_pi(adev); | ||
| 2214 | int i, ret; | 2213 | int i, ret; |
| 2215 | 2214 | ||
| 2216 | if (amdgpu_ci_is_smc_running(adev)) { | 2215 | if (amdgpu_ci_is_smc_running(adev)) { |
| @@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev) | |||
| 2227 | amdgpu_ci_stop_smc_clock(adev); | 2226 | amdgpu_ci_stop_smc_clock(adev); |
| 2228 | amdgpu_ci_reset_smc(adev); | 2227 | amdgpu_ci_reset_smc(adev); |
| 2229 | 2228 | ||
| 2230 | ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end); | 2229 | ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END); |
| 2231 | 2230 | ||
| 2232 | return ret; | 2231 | return ret; |
| 2233 | 2232 | ||
| @@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, | |||
| 4257 | 4256 | ||
| 4258 | if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { | 4257 | if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { |
| 4259 | if (amdgpu_new_state->evclk) { | 4258 | if (amdgpu_new_state->evclk) { |
| 4260 | /* turn the clocks on when encoding */ | ||
| 4261 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 4262 | AMD_CG_STATE_UNGATE); | ||
| 4263 | if (ret) | ||
| 4264 | return ret; | ||
| 4265 | |||
| 4266 | pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev); | 4259 | pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev); |
| 4267 | tmp = RREG32_SMC(ixDPM_TABLE_475); | 4260 | tmp = RREG32_SMC(ixDPM_TABLE_475); |
| 4268 | tmp &= ~DPM_TABLE_475__VceBootLevel_MASK; | 4261 | tmp &= ~DPM_TABLE_475__VceBootLevel_MASK; |
| @@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, | |||
| 4274 | ret = ci_enable_vce_dpm(adev, false); | 4267 | ret = ci_enable_vce_dpm(adev, false); |
| 4275 | if (ret) | 4268 | if (ret) |
| 4276 | return ret; | 4269 | return ret; |
| 4277 | /* turn the clocks off when not encoding */ | ||
| 4278 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 4279 | AMD_CG_STATE_GATE); | ||
| 4280 | } | 4270 | } |
| 4281 | } | 4271 | } |
| 4282 | return ret; | 4272 | return ret; |
| @@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle) | |||
| 6278 | adev->pm.current_mclk = adev->clock.default_mclk; | 6268 | adev->pm.current_mclk = adev->clock.default_mclk; |
| 6279 | adev->pm.int_thermal_type = THERMAL_TYPE_NONE; | 6269 | adev->pm.int_thermal_type = THERMAL_TYPE_NONE; |
| 6280 | 6270 | ||
| 6281 | if (amdgpu_dpm == 0) | ||
| 6282 | return 0; | ||
| 6283 | |||
| 6284 | ret = ci_dpm_init_microcode(adev); | 6271 | ret = ci_dpm_init_microcode(adev); |
| 6285 | if (ret) | 6272 | if (ret) |
| 6286 | return ret; | 6273 | return ret; |
| 6287 | 6274 | ||
| 6275 | if (amdgpu_dpm == 0) | ||
| 6276 | return 0; | ||
| 6277 | |||
| 6288 | INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); | 6278 | INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); |
| 6289 | mutex_lock(&adev->pm.mutex); | 6279 | mutex_lock(&adev->pm.mutex); |
| 6290 | ret = ci_dpm_init(adev); | 6280 | ret = ci_dpm_init(adev); |
| @@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle) | |||
| 6328 | 6318 | ||
| 6329 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 6319 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 6330 | 6320 | ||
| 6331 | if (!amdgpu_dpm) | 6321 | if (!amdgpu_dpm) { |
| 6322 | ret = ci_upload_firmware(adev); | ||
| 6323 | if (ret) { | ||
| 6324 | DRM_ERROR("ci_upload_firmware failed\n"); | ||
| 6325 | return ret; | ||
| 6326 | } | ||
| 6327 | ci_dpm_start_smc(adev); | ||
| 6332 | return 0; | 6328 | return 0; |
| 6329 | } | ||
| 6333 | 6330 | ||
| 6334 | mutex_lock(&adev->pm.mutex); | 6331 | mutex_lock(&adev->pm.mutex); |
| 6335 | ci_dpm_setup_asic(adev); | 6332 | ci_dpm_setup_asic(adev); |
| @@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle) | |||
| 6351 | mutex_lock(&adev->pm.mutex); | 6348 | mutex_lock(&adev->pm.mutex); |
| 6352 | ci_dpm_disable(adev); | 6349 | ci_dpm_disable(adev); |
| 6353 | mutex_unlock(&adev->pm.mutex); | 6350 | mutex_unlock(&adev->pm.mutex); |
| 6351 | } else { | ||
| 6352 | ci_dpm_stop_smc(adev); | ||
| 6354 | } | 6353 | } |
| 6355 | 6354 | ||
| 6356 | return 0; | 6355 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 7da688b0d27d..c4d4b35e54ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
| @@ -1176,6 +1176,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) | |||
| 1176 | if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { | 1176 | if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { |
| 1177 | /* enable BM */ | 1177 | /* enable BM */ |
| 1178 | pci_set_master(adev->pdev); | 1178 | pci_set_master(adev->pdev); |
| 1179 | adev->has_hw_reset = true; | ||
| 1179 | r = 0; | 1180 | r = 0; |
| 1180 | break; | 1181 | break; |
| 1181 | } | 1182 | } |
| @@ -1722,8 +1723,8 @@ static int cik_common_early_init(void *handle) | |||
| 1722 | AMD_PG_SUPPORT_GFX_SMG | | 1723 | AMD_PG_SUPPORT_GFX_SMG | |
| 1723 | AMD_PG_SUPPORT_GFX_DMG |*/ | 1724 | AMD_PG_SUPPORT_GFX_DMG |*/ |
| 1724 | AMD_PG_SUPPORT_UVD | | 1725 | AMD_PG_SUPPORT_UVD | |
| 1725 | /*AMD_PG_SUPPORT_VCE | | 1726 | AMD_PG_SUPPORT_VCE | |
| 1726 | AMD_PG_SUPPORT_CP | | 1727 | /* AMD_PG_SUPPORT_CP | |
| 1727 | AMD_PG_SUPPORT_GDS | | 1728 | AMD_PG_SUPPORT_GDS | |
| 1728 | AMD_PG_SUPPORT_RLC_SMU_HS | | 1729 | AMD_PG_SUPPORT_RLC_SMU_HS | |
| 1729 | AMD_PG_SUPPORT_ACP | | 1730 | AMD_PG_SUPPORT_ACP | |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 1cf1d9d1aec1..5b24e89552ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
| @@ -3737,9 +3737,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, | |||
| 3737 | default: | 3737 | default: |
| 3738 | encoder->possible_crtcs = 0x3; | 3738 | encoder->possible_crtcs = 0x3; |
| 3739 | break; | 3739 | break; |
| 3740 | case 3: | ||
| 3741 | encoder->possible_crtcs = 0x7; | ||
| 3742 | break; | ||
| 3740 | case 4: | 3743 | case 4: |
| 3741 | encoder->possible_crtcs = 0xf; | 3744 | encoder->possible_crtcs = 0xf; |
| 3742 | break; | 3745 | break; |
| 3746 | case 5: | ||
| 3747 | encoder->possible_crtcs = 0x1f; | ||
| 3748 | break; | ||
| 3743 | case 6: | 3749 | case 6: |
| 3744 | encoder->possible_crtcs = 0x3f; | 3750 | encoder->possible_crtcs = 0x3f; |
| 3745 | break; | 3751 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 762f8e82ceb7..e9a176891e13 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c | |||
| @@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = | |||
| 627 | 627 | ||
| 628 | static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) | 628 | static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) |
| 629 | { | 629 | { |
| 630 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
| 631 | |||
| 632 | kfree(amdgpu_encoder->enc_priv); | ||
| 633 | drm_encoder_cleanup(encoder); | 630 | drm_encoder_cleanup(encoder); |
| 634 | kfree(amdgpu_encoder); | 631 | kfree(encoder); |
| 635 | } | 632 | } |
| 636 | 633 | ||
| 637 | static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { | 634 | static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index c998f6aaaf36..2086e7e68de4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | |||
| @@ -1325,21 +1325,19 @@ static u32 gfx_v6_0_create_bitmask(u32 bit_width) | |||
| 1325 | return (u32)(((u64)1 << bit_width) - 1); | 1325 | return (u32)(((u64)1 << bit_width) - 1); |
| 1326 | } | 1326 | } |
| 1327 | 1327 | ||
| 1328 | static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev, | 1328 | static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev) |
| 1329 | u32 max_rb_num_per_se, | ||
| 1330 | u32 sh_per_se) | ||
| 1331 | { | 1329 | { |
| 1332 | u32 data, mask; | 1330 | u32 data, mask; |
| 1333 | 1331 | ||
| 1334 | data = RREG32(mmCC_RB_BACKEND_DISABLE); | 1332 | data = RREG32(mmCC_RB_BACKEND_DISABLE) | |
| 1335 | data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; | 1333 | RREG32(mmGC_USER_RB_BACKEND_DISABLE); |
| 1336 | data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); | ||
| 1337 | 1334 | ||
| 1338 | data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; | 1335 | data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE); |
| 1339 | 1336 | ||
| 1340 | mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se); | 1337 | mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_backends_per_se/ |
| 1338 | adev->gfx.config.max_sh_per_se); | ||
| 1341 | 1339 | ||
| 1342 | return data & mask; | 1340 | return ~data & mask; |
| 1343 | } | 1341 | } |
| 1344 | 1342 | ||
| 1345 | static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf) | 1343 | static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf) |
| @@ -1468,68 +1466,55 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, | |||
| 1468 | gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | 1466 | gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
| 1469 | } | 1467 | } |
| 1470 | 1468 | ||
| 1471 | static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, | 1469 | static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) |
| 1472 | u32 se_num, u32 sh_per_se, | ||
| 1473 | u32 max_rb_num_per_se) | ||
| 1474 | { | 1470 | { |
| 1475 | int i, j; | 1471 | int i, j; |
| 1476 | u32 data, mask; | 1472 | u32 data; |
| 1477 | u32 disabled_rbs = 0; | 1473 | u32 raster_config = 0; |
| 1478 | u32 enabled_rbs = 0; | 1474 | u32 active_rbs = 0; |
| 1475 | u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / | ||
| 1476 | adev->gfx.config.max_sh_per_se; | ||
| 1479 | unsigned num_rb_pipes; | 1477 | unsigned num_rb_pipes; |
| 1480 | 1478 | ||
| 1481 | mutex_lock(&adev->grbm_idx_mutex); | 1479 | mutex_lock(&adev->grbm_idx_mutex); |
| 1482 | for (i = 0; i < se_num; i++) { | 1480 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { |
| 1483 | for (j = 0; j < sh_per_se; j++) { | 1481 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { |
| 1484 | gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); | 1482 | gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); |
| 1485 | data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); | 1483 | data = gfx_v6_0_get_rb_active_bitmap(adev); |
| 1486 | disabled_rbs |= data << ((i * sh_per_se + j) * 2); | 1484 | active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * |
| 1485 | rb_bitmap_width_per_sh); | ||
| 1487 | } | 1486 | } |
| 1488 | } | 1487 | } |
| 1489 | gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | 1488 | gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
| 1490 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 1491 | |||
| 1492 | mask = 1; | ||
| 1493 | for (i = 0; i < max_rb_num_per_se * se_num; i++) { | ||
| 1494 | if (!(disabled_rbs & mask)) | ||
| 1495 | enabled_rbs |= mask; | ||
| 1496 | mask <<= 1; | ||
| 1497 | } | ||
| 1498 | 1489 | ||
| 1499 | adev->gfx.config.backend_enable_mask = enabled_rbs; | 1490 | adev->gfx.config.backend_enable_mask = active_rbs; |
| 1500 | adev->gfx.config.num_rbs = hweight32(enabled_rbs); | 1491 | adev->gfx.config.num_rbs = hweight32(active_rbs); |
| 1501 | 1492 | ||
| 1502 | num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * | 1493 | num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * |
| 1503 | adev->gfx.config.max_shader_engines, 16); | 1494 | adev->gfx.config.max_shader_engines, 16); |
| 1504 | 1495 | ||
| 1505 | mutex_lock(&adev->grbm_idx_mutex); | 1496 | gfx_v6_0_raster_config(adev, &raster_config); |
| 1506 | for (i = 0; i < se_num; i++) { | ||
| 1507 | gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); | ||
| 1508 | data = 0; | ||
| 1509 | for (j = 0; j < sh_per_se; j++) { | ||
| 1510 | switch (enabled_rbs & 3) { | ||
| 1511 | case 1: | ||
| 1512 | data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); | ||
| 1513 | break; | ||
| 1514 | case 2: | ||
| 1515 | data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); | ||
| 1516 | break; | ||
| 1517 | case 3: | ||
| 1518 | default: | ||
| 1519 | data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); | ||
| 1520 | break; | ||
| 1521 | } | ||
| 1522 | enabled_rbs >>= 2; | ||
| 1523 | } | ||
| 1524 | gfx_v6_0_raster_config(adev, &data); | ||
| 1525 | 1497 | ||
| 1526 | if (!adev->gfx.config.backend_enable_mask || | 1498 | if (!adev->gfx.config.backend_enable_mask || |
| 1527 | adev->gfx.config.num_rbs >= num_rb_pipes) | 1499 | adev->gfx.config.num_rbs >= num_rb_pipes) { |
| 1528 | WREG32(mmPA_SC_RASTER_CONFIG, data); | 1500 | WREG32(mmPA_SC_RASTER_CONFIG, raster_config); |
| 1529 | else | 1501 | } else { |
| 1530 | gfx_v6_0_write_harvested_raster_configs(adev, data, | 1502 | gfx_v6_0_write_harvested_raster_configs(adev, raster_config, |
| 1531 | adev->gfx.config.backend_enable_mask, | 1503 | adev->gfx.config.backend_enable_mask, |
| 1532 | num_rb_pipes); | 1504 | num_rb_pipes); |
| 1505 | } | ||
| 1506 | |||
| 1507 | /* cache the values for userspace */ | ||
| 1508 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | ||
| 1509 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | ||
| 1510 | gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); | ||
| 1511 | adev->gfx.config.rb_config[i][j].rb_backend_disable = | ||
| 1512 | RREG32(mmCC_RB_BACKEND_DISABLE); | ||
| 1513 | adev->gfx.config.rb_config[i][j].user_rb_backend_disable = | ||
| 1514 | RREG32(mmGC_USER_RB_BACKEND_DISABLE); | ||
| 1515 | adev->gfx.config.rb_config[i][j].raster_config = | ||
| 1516 | RREG32(mmPA_SC_RASTER_CONFIG); | ||
| 1517 | } | ||
| 1533 | } | 1518 | } |
| 1534 | gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | 1519 | gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
| 1535 | mutex_unlock(&adev->grbm_idx_mutex); | 1520 | mutex_unlock(&adev->grbm_idx_mutex); |
| @@ -1540,36 +1525,44 @@ static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev) | |||
| 1540 | } | 1525 | } |
| 1541 | */ | 1526 | */ |
| 1542 | 1527 | ||
| 1543 | static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh) | 1528 | static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, |
| 1529 | u32 bitmap) | ||
| 1544 | { | 1530 | { |
| 1545 | u32 data, mask; | 1531 | u32 data; |
| 1546 | 1532 | ||
| 1547 | data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); | 1533 | if (!bitmap) |
| 1548 | data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; | 1534 | return; |
| 1549 | data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); | ||
| 1550 | 1535 | ||
| 1551 | data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; | 1536 | data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; |
| 1537 | data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; | ||
| 1552 | 1538 | ||
| 1553 | mask = gfx_v6_0_create_bitmask(cu_per_sh); | 1539 | WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); |
| 1540 | } | ||
| 1554 | 1541 | ||
| 1555 | return ~data & mask; | 1542 | static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev) |
| 1543 | { | ||
| 1544 | u32 data, mask; | ||
| 1545 | |||
| 1546 | data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | | ||
| 1547 | RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); | ||
| 1548 | |||
| 1549 | mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_cu_per_sh); | ||
| 1550 | return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask; | ||
| 1556 | } | 1551 | } |
| 1557 | 1552 | ||
| 1558 | 1553 | ||
| 1559 | static void gfx_v6_0_setup_spi(struct amdgpu_device *adev, | 1554 | static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) |
| 1560 | u32 se_num, u32 sh_per_se, | ||
| 1561 | u32 cu_per_sh) | ||
| 1562 | { | 1555 | { |
| 1563 | int i, j, k; | 1556 | int i, j, k; |
| 1564 | u32 data, mask; | 1557 | u32 data, mask; |
| 1565 | u32 active_cu = 0; | 1558 | u32 active_cu = 0; |
| 1566 | 1559 | ||
| 1567 | mutex_lock(&adev->grbm_idx_mutex); | 1560 | mutex_lock(&adev->grbm_idx_mutex); |
| 1568 | for (i = 0; i < se_num; i++) { | 1561 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { |
| 1569 | for (j = 0; j < sh_per_se; j++) { | 1562 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { |
| 1570 | gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); | 1563 | gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); |
| 1571 | data = RREG32(mmSPI_STATIC_THREAD_MGMT_3); | 1564 | data = RREG32(mmSPI_STATIC_THREAD_MGMT_3); |
| 1572 | active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh); | 1565 | active_cu = gfx_v6_0_get_cu_enabled(adev); |
| 1573 | 1566 | ||
| 1574 | mask = 1; | 1567 | mask = 1; |
| 1575 | for (k = 0; k < 16; k++) { | 1568 | for (k = 0; k < 16; k++) { |
| @@ -1717,6 +1710,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) | |||
| 1717 | gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT; | 1710 | gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT; |
| 1718 | break; | 1711 | break; |
| 1719 | } | 1712 | } |
| 1713 | gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK; | ||
| 1714 | if (adev->gfx.config.max_shader_engines == 2) | ||
| 1715 | gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT; | ||
| 1720 | adev->gfx.config.gb_addr_config = gb_addr_config; | 1716 | adev->gfx.config.gb_addr_config = gb_addr_config; |
| 1721 | 1717 | ||
| 1722 | WREG32(mmGB_ADDR_CONFIG, gb_addr_config); | 1718 | WREG32(mmGB_ADDR_CONFIG, gb_addr_config); |
| @@ -1735,13 +1731,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) | |||
| 1735 | #endif | 1731 | #endif |
| 1736 | gfx_v6_0_tiling_mode_table_init(adev); | 1732 | gfx_v6_0_tiling_mode_table_init(adev); |
| 1737 | 1733 | ||
| 1738 | gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines, | 1734 | gfx_v6_0_setup_rb(adev); |
| 1739 | adev->gfx.config.max_sh_per_se, | ||
| 1740 | adev->gfx.config.max_backends_per_se); | ||
| 1741 | 1735 | ||
| 1742 | gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines, | 1736 | gfx_v6_0_setup_spi(adev); |
| 1743 | adev->gfx.config.max_sh_per_se, | ||
| 1744 | adev->gfx.config.max_cu_per_sh); | ||
| 1745 | 1737 | ||
| 1746 | gfx_v6_0_get_cu_info(adev); | 1738 | gfx_v6_0_get_cu_info(adev); |
| 1747 | 1739 | ||
| @@ -2941,61 +2933,16 @@ static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev, | |||
| 2941 | } | 2933 | } |
| 2942 | } | 2934 | } |
| 2943 | 2935 | ||
| 2944 | static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev, | ||
| 2945 | u32 se, u32 sh) | ||
| 2946 | { | ||
| 2947 | |||
| 2948 | u32 mask = 0, tmp, tmp1; | ||
| 2949 | int i; | ||
| 2950 | |||
| 2951 | mutex_lock(&adev->grbm_idx_mutex); | ||
| 2952 | gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff); | ||
| 2953 | tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); | ||
| 2954 | tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); | ||
| 2955 | gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | ||
| 2956 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 2957 | |||
| 2958 | tmp &= 0xffff0000; | ||
| 2959 | |||
| 2960 | tmp |= tmp1; | ||
| 2961 | tmp >>= 16; | ||
| 2962 | |||
| 2963 | for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { | ||
| 2964 | mask <<= 1; | ||
| 2965 | mask |= 1; | ||
| 2966 | } | ||
| 2967 | |||
| 2968 | return (~tmp) & mask; | ||
| 2969 | } | ||
| 2970 | |||
| 2971 | static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev) | 2936 | static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev) |
| 2972 | { | 2937 | { |
| 2973 | u32 i, j, k, active_cu_number = 0; | 2938 | u32 tmp; |
| 2974 | |||
| 2975 | u32 mask, counter, cu_bitmap; | ||
| 2976 | u32 tmp = 0; | ||
| 2977 | |||
| 2978 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | ||
| 2979 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | ||
| 2980 | mask = 1; | ||
| 2981 | cu_bitmap = 0; | ||
| 2982 | counter = 0; | ||
| 2983 | for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { | ||
| 2984 | if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) { | ||
| 2985 | if (counter < 2) | ||
| 2986 | cu_bitmap |= mask; | ||
| 2987 | counter++; | ||
| 2988 | } | ||
| 2989 | mask <<= 1; | ||
| 2990 | } | ||
| 2991 | 2939 | ||
| 2992 | active_cu_number += counter; | 2940 | WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); |
| 2993 | tmp |= (cu_bitmap << (i * 16 + j * 8)); | ||
| 2994 | } | ||
| 2995 | } | ||
| 2996 | 2941 | ||
| 2997 | WREG32(mmRLC_PG_AO_CU_MASK, tmp); | 2942 | tmp = RREG32(mmRLC_MAX_PG_CU); |
| 2998 | WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number); | 2943 | tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; |
| 2944 | tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); | ||
| 2945 | WREG32(mmRLC_MAX_PG_CU, tmp); | ||
| 2999 | } | 2946 | } |
| 3000 | 2947 | ||
| 3001 | static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, | 2948 | static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, |
| @@ -3770,18 +3717,26 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) | |||
| 3770 | int i, j, k, counter, active_cu_number = 0; | 3717 | int i, j, k, counter, active_cu_number = 0; |
| 3771 | u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; | 3718 | u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; |
| 3772 | struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; | 3719 | struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; |
| 3720 | unsigned disable_masks[4 * 2]; | ||
| 3773 | 3721 | ||
| 3774 | memset(cu_info, 0, sizeof(*cu_info)); | 3722 | memset(cu_info, 0, sizeof(*cu_info)); |
| 3775 | 3723 | ||
| 3724 | amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); | ||
| 3725 | |||
| 3726 | mutex_lock(&adev->grbm_idx_mutex); | ||
| 3776 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | 3727 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { |
| 3777 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | 3728 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { |
| 3778 | mask = 1; | 3729 | mask = 1; |
| 3779 | ao_bitmap = 0; | 3730 | ao_bitmap = 0; |
| 3780 | counter = 0; | 3731 | counter = 0; |
| 3781 | bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j); | 3732 | gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); |
| 3733 | if (i < 4 && j < 2) | ||
| 3734 | gfx_v6_0_set_user_cu_inactive_bitmap( | ||
| 3735 | adev, disable_masks[i * 2 + j]); | ||
| 3736 | bitmap = gfx_v6_0_get_cu_enabled(adev); | ||
| 3782 | cu_info->bitmap[i][j] = bitmap; | 3737 | cu_info->bitmap[i][j] = bitmap; |
| 3783 | 3738 | ||
| 3784 | for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { | 3739 | for (k = 0; k < 16; k++) { |
| 3785 | if (bitmap & mask) { | 3740 | if (bitmap & mask) { |
| 3786 | if (counter < 2) | 3741 | if (counter < 2) |
| 3787 | ao_bitmap |= mask; | 3742 | ao_bitmap |= mask; |
| @@ -3794,6 +3749,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) | |||
| 3794 | } | 3749 | } |
| 3795 | } | 3750 | } |
| 3796 | 3751 | ||
| 3752 | gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | ||
| 3753 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 3754 | |||
| 3797 | cu_info->number = active_cu_number; | 3755 | cu_info->number = active_cu_number; |
| 3798 | cu_info->ao_cu_mask = ao_cu_mask; | 3756 | cu_info->ao_cu_mask = ao_cu_mask; |
| 3799 | } | 3757 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index e3589b55a1e1..1f9354541f29 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
| @@ -1983,6 +1983,14 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) | |||
| 1983 | WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | | 1983 | WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | |
| 1984 | (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); | 1984 | (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); |
| 1985 | WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); | 1985 | WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); |
| 1986 | |||
| 1987 | tmp = RREG32(mmSPI_ARB_PRIORITY); | ||
| 1988 | tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); | ||
| 1989 | tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); | ||
| 1990 | tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); | ||
| 1991 | tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); | ||
| 1992 | WREG32(mmSPI_ARB_PRIORITY, tmp); | ||
| 1993 | |||
| 1986 | mutex_unlock(&adev->grbm_idx_mutex); | 1994 | mutex_unlock(&adev->grbm_idx_mutex); |
| 1987 | 1995 | ||
| 1988 | udelay(50); | 1996 | udelay(50); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 35f9cd83b821..67afc901905c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -3898,6 +3898,14 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) | |||
| 3898 | PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | | 3898 | PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | |
| 3899 | (adev->gfx.config.sc_earlyz_tile_fifo_size << | 3899 | (adev->gfx.config.sc_earlyz_tile_fifo_size << |
| 3900 | PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); | 3900 | PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); |
| 3901 | |||
| 3902 | tmp = RREG32(mmSPI_ARB_PRIORITY); | ||
| 3903 | tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); | ||
| 3904 | tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); | ||
| 3905 | tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); | ||
| 3906 | tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); | ||
| 3907 | WREG32(mmSPI_ARB_PRIORITY, tmp); | ||
| 3908 | |||
| 3901 | mutex_unlock(&adev->grbm_idx_mutex); | 3909 | mutex_unlock(&adev->grbm_idx_mutex); |
| 3902 | 3910 | ||
| 3903 | } | 3911 | } |
| @@ -7260,7 +7268,7 @@ static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t c | |||
| 7260 | static union { | 7268 | static union { |
| 7261 | struct amdgpu_ce_ib_state regular; | 7269 | struct amdgpu_ce_ib_state regular; |
| 7262 | struct amdgpu_ce_ib_state_chained_ib chained; | 7270 | struct amdgpu_ce_ib_state_chained_ib chained; |
| 7263 | } ce_payload = {0}; | 7271 | } ce_payload = {}; |
| 7264 | 7272 | ||
| 7265 | if (ring->adev->virt.chained_ib_support) { | 7273 | if (ring->adev->virt.chained_ib_support) { |
| 7266 | ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload); | 7274 | ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload); |
| @@ -7287,7 +7295,7 @@ static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t c | |||
| 7287 | static union { | 7295 | static union { |
| 7288 | struct amdgpu_de_ib_state regular; | 7296 | struct amdgpu_de_ib_state regular; |
| 7289 | struct amdgpu_de_ib_state_chained_ib chained; | 7297 | struct amdgpu_de_ib_state_chained_ib chained; |
| 7290 | } de_payload = {0}; | 7298 | } de_payload = {}; |
| 7291 | 7299 | ||
| 7292 | gds_addr = csa_addr + 4096; | 7300 | gds_addr = csa_addr + 4096; |
| 7293 | if (ring->adev->virt.chained_ib_support) { | 7301 | if (ring->adev->virt.chained_ib_support) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e2b0b1646f99..0635829b18cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
| @@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) | |||
| 254 | } | 254 | } |
| 255 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); | 255 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); |
| 256 | 256 | ||
| 257 | if (adev->mode_info.num_crtc) | ||
| 258 | amdgpu_display_set_vga_render_state(adev, false); | ||
| 259 | |||
| 257 | gmc_v6_0_mc_stop(adev, &save); | 260 | gmc_v6_0_mc_stop(adev, &save); |
| 258 | 261 | ||
| 259 | if (gmc_v6_0_wait_for_idle((void *)adev)) { | 262 | if (gmc_v6_0_wait_for_idle((void *)adev)) { |
| @@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) | |||
| 283 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | 286 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); |
| 284 | } | 287 | } |
| 285 | gmc_v6_0_mc_resume(adev, &save); | 288 | gmc_v6_0_mc_resume(adev, &save); |
| 286 | amdgpu_display_set_vga_render_state(adev, false); | ||
| 287 | } | 289 | } |
| 288 | 290 | ||
| 289 | static int gmc_v6_0_mc_init(struct amdgpu_device *adev) | 291 | static int gmc_v6_0_mc_init(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 8785ca570729..f5a343cb0010 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
| @@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
| 1550 | 1550 | ||
| 1551 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { | 1551 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { |
| 1552 | kv_dpm_powergate_vce(adev, false); | 1552 | kv_dpm_powergate_vce(adev, false); |
| 1553 | /* turn the clocks on when encoding */ | ||
| 1554 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1555 | AMD_CG_STATE_UNGATE); | ||
| 1556 | if (ret) | ||
| 1557 | return ret; | ||
| 1558 | if (pi->caps_stable_p_state) | 1553 | if (pi->caps_stable_p_state) |
| 1559 | pi->vce_boot_level = table->count - 1; | 1554 | pi->vce_boot_level = table->count - 1; |
| 1560 | else | 1555 | else |
| @@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
| 1573 | amdgpu_kv_send_msg_to_smc_with_parameter(adev, | 1568 | amdgpu_kv_send_msg_to_smc_with_parameter(adev, |
| 1574 | PPSMC_MSG_VCEDPM_SetEnabledMask, | 1569 | PPSMC_MSG_VCEDPM_SetEnabledMask, |
| 1575 | (1 << pi->vce_boot_level)); | 1570 | (1 << pi->vce_boot_level)); |
| 1576 | |||
| 1577 | kv_enable_vce_dpm(adev, true); | 1571 | kv_enable_vce_dpm(adev, true); |
| 1578 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { | 1572 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { |
| 1579 | kv_enable_vce_dpm(adev, false); | 1573 | kv_enable_vce_dpm(adev, false); |
| 1580 | /* turn the clocks off when not encoding */ | ||
| 1581 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1582 | AMD_CG_STATE_GATE); | ||
| 1583 | if (ret) | ||
| 1584 | return ret; | ||
| 1585 | kv_dpm_powergate_vce(adev, true); | 1574 | kv_dpm_powergate_vce(adev, true); |
| 1586 | } | 1575 | } |
| 1587 | 1576 | ||
| @@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) | |||
| 1688 | struct kv_power_info *pi = kv_get_pi(adev); | 1677 | struct kv_power_info *pi = kv_get_pi(adev); |
| 1689 | int ret; | 1678 | int ret; |
| 1690 | 1679 | ||
| 1691 | if (pi->uvd_power_gated == gate) | ||
| 1692 | return; | ||
| 1693 | |||
| 1694 | pi->uvd_power_gated = gate; | 1680 | pi->uvd_power_gated = gate; |
| 1695 | 1681 | ||
| 1696 | if (gate) { | 1682 | if (gate) { |
| 1697 | if (pi->caps_uvd_pg) { | 1683 | /* stop the UVD block */ |
| 1698 | /* disable clockgating so we can properly shut down the block */ | 1684 | ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, |
| 1699 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | 1685 | AMD_PG_STATE_GATE); |
| 1700 | AMD_CG_STATE_UNGATE); | ||
| 1701 | /* shutdown the UVD block */ | ||
| 1702 | ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
| 1703 | AMD_PG_STATE_GATE); | ||
| 1704 | /* XXX: check for errors */ | ||
| 1705 | } | ||
| 1706 | kv_update_uvd_dpm(adev, gate); | 1686 | kv_update_uvd_dpm(adev, gate); |
| 1707 | if (pi->caps_uvd_pg) | 1687 | if (pi->caps_uvd_pg) |
| 1708 | /* power off the UVD block */ | 1688 | /* power off the UVD block */ |
| 1709 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); | 1689 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); |
| 1710 | } else { | 1690 | } else { |
| 1711 | if (pi->caps_uvd_pg) { | 1691 | if (pi->caps_uvd_pg) |
| 1712 | /* power on the UVD block */ | 1692 | /* power on the UVD block */ |
| 1713 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); | 1693 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); |
| 1714 | /* re-init the UVD block */ | 1694 | /* re-init the UVD block */ |
| 1715 | ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
| 1716 | AMD_PG_STATE_UNGATE); | ||
| 1717 | /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ | ||
| 1718 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
| 1719 | AMD_CG_STATE_GATE); | ||
| 1720 | /* XXX: check for errors */ | ||
| 1721 | } | ||
| 1722 | kv_update_uvd_dpm(adev, gate); | 1695 | kv_update_uvd_dpm(adev, gate); |
| 1696 | |||
| 1697 | ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
| 1698 | AMD_PG_STATE_UNGATE); | ||
| 1723 | } | 1699 | } |
| 1724 | } | 1700 | } |
| 1725 | 1701 | ||
| 1726 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) | 1702 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) |
| 1727 | { | 1703 | { |
| 1728 | struct kv_power_info *pi = kv_get_pi(adev); | 1704 | struct kv_power_info *pi = kv_get_pi(adev); |
| 1729 | int ret; | ||
| 1730 | 1705 | ||
| 1731 | if (pi->vce_power_gated == gate) | 1706 | if (pi->vce_power_gated == gate) |
| 1732 | return; | 1707 | return; |
| 1733 | 1708 | ||
| 1734 | pi->vce_power_gated = gate; | 1709 | pi->vce_power_gated = gate; |
| 1735 | 1710 | ||
| 1736 | if (gate) { | 1711 | if (!pi->caps_vce_pg) |
| 1737 | if (pi->caps_vce_pg) { | 1712 | return; |
| 1738 | /* shutdown the VCE block */ | 1713 | |
| 1739 | ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | 1714 | if (gate) |
| 1740 | AMD_PG_STATE_GATE); | 1715 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); |
| 1741 | /* XXX: check for errors */ | 1716 | else |
| 1742 | /* power off the VCE block */ | 1717 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); |
| 1743 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); | ||
| 1744 | } | ||
| 1745 | } else { | ||
| 1746 | if (pi->caps_vce_pg) { | ||
| 1747 | /* power on the VCE block */ | ||
| 1748 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | ||
| 1749 | /* re-init the VCE block */ | ||
| 1750 | ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1751 | AMD_PG_STATE_UNGATE); | ||
| 1752 | /* XXX: check for errors */ | ||
| 1753 | } | ||
| 1754 | } | ||
| 1755 | } | 1718 | } |
| 1756 | 1719 | ||
| 1757 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) | 1720 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) |
| @@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle) | |||
| 3009 | 2972 | ||
| 3010 | kv_dpm_powergate_acp(adev, true); | 2973 | kv_dpm_powergate_acp(adev, true); |
| 3011 | kv_dpm_powergate_samu(adev, true); | 2974 | kv_dpm_powergate_samu(adev, true); |
| 3012 | kv_dpm_powergate_vce(adev, true); | 2975 | |
| 3013 | kv_dpm_powergate_uvd(adev, true); | ||
| 3014 | return 0; | 2976 | return 0; |
| 3015 | } | 2977 | } |
| 3016 | 2978 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index da46992f7b18..b71e3faa40db 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c | |||
| @@ -1010,24 +1010,81 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { | |||
| 1010 | {PA_SC_RASTER_CONFIG, false, true}, | 1010 | {PA_SC_RASTER_CONFIG, false, true}, |
| 1011 | }; | 1011 | }; |
| 1012 | 1012 | ||
| 1013 | static uint32_t si_read_indexed_register(struct amdgpu_device *adev, | 1013 | static uint32_t si_get_register_value(struct amdgpu_device *adev, |
| 1014 | u32 se_num, u32 sh_num, | 1014 | bool indexed, u32 se_num, |
| 1015 | u32 reg_offset) | 1015 | u32 sh_num, u32 reg_offset) |
| 1016 | { | 1016 | { |
| 1017 | uint32_t val; | 1017 | if (indexed) { |
| 1018 | uint32_t val; | ||
| 1019 | unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; | ||
| 1020 | unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; | ||
| 1021 | |||
| 1022 | switch (reg_offset) { | ||
| 1023 | case mmCC_RB_BACKEND_DISABLE: | ||
| 1024 | return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; | ||
| 1025 | case mmGC_USER_RB_BACKEND_DISABLE: | ||
| 1026 | return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; | ||
| 1027 | case mmPA_SC_RASTER_CONFIG: | ||
| 1028 | return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; | ||
| 1029 | } | ||
| 1018 | 1030 | ||
| 1019 | mutex_lock(&adev->grbm_idx_mutex); | 1031 | mutex_lock(&adev->grbm_idx_mutex); |
| 1020 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | 1032 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
| 1021 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); | 1033 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); |
| 1022 | 1034 | ||
| 1023 | val = RREG32(reg_offset); | 1035 | val = RREG32(reg_offset); |
| 1024 | 1036 | ||
| 1025 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | 1037 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
| 1026 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | 1038 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
| 1027 | mutex_unlock(&adev->grbm_idx_mutex); | 1039 | mutex_unlock(&adev->grbm_idx_mutex); |
| 1028 | return val; | 1040 | return val; |
| 1041 | } else { | ||
| 1042 | unsigned idx; | ||
| 1043 | |||
| 1044 | switch (reg_offset) { | ||
| 1045 | case mmGB_ADDR_CONFIG: | ||
| 1046 | return adev->gfx.config.gb_addr_config; | ||
| 1047 | case mmMC_ARB_RAMCFG: | ||
| 1048 | return adev->gfx.config.mc_arb_ramcfg; | ||
| 1049 | case mmGB_TILE_MODE0: | ||
| 1050 | case mmGB_TILE_MODE1: | ||
| 1051 | case mmGB_TILE_MODE2: | ||
| 1052 | case mmGB_TILE_MODE3: | ||
| 1053 | case mmGB_TILE_MODE4: | ||
| 1054 | case mmGB_TILE_MODE5: | ||
| 1055 | case mmGB_TILE_MODE6: | ||
| 1056 | case mmGB_TILE_MODE7: | ||
| 1057 | case mmGB_TILE_MODE8: | ||
| 1058 | case mmGB_TILE_MODE9: | ||
| 1059 | case mmGB_TILE_MODE10: | ||
| 1060 | case mmGB_TILE_MODE11: | ||
| 1061 | case mmGB_TILE_MODE12: | ||
| 1062 | case mmGB_TILE_MODE13: | ||
| 1063 | case mmGB_TILE_MODE14: | ||
| 1064 | case mmGB_TILE_MODE15: | ||
| 1065 | case mmGB_TILE_MODE16: | ||
| 1066 | case mmGB_TILE_MODE17: | ||
| 1067 | case mmGB_TILE_MODE18: | ||
| 1068 | case mmGB_TILE_MODE19: | ||
| 1069 | case mmGB_TILE_MODE20: | ||
| 1070 | case mmGB_TILE_MODE21: | ||
| 1071 | case mmGB_TILE_MODE22: | ||
| 1072 | case mmGB_TILE_MODE23: | ||
| 1073 | case mmGB_TILE_MODE24: | ||
| 1074 | case mmGB_TILE_MODE25: | ||
| 1075 | case mmGB_TILE_MODE26: | ||
| 1076 | case mmGB_TILE_MODE27: | ||
| 1077 | case mmGB_TILE_MODE28: | ||
| 1078 | case mmGB_TILE_MODE29: | ||
| 1079 | case mmGB_TILE_MODE30: | ||
| 1080 | case mmGB_TILE_MODE31: | ||
| 1081 | idx = (reg_offset - mmGB_TILE_MODE0); | ||
| 1082 | return adev->gfx.config.tile_mode_array[idx]; | ||
| 1083 | default: | ||
| 1084 | return RREG32(reg_offset); | ||
| 1085 | } | ||
| 1086 | } | ||
| 1029 | } | 1087 | } |
| 1030 | |||
| 1031 | static int si_read_register(struct amdgpu_device *adev, u32 se_num, | 1088 | static int si_read_register(struct amdgpu_device *adev, u32 se_num, |
| 1032 | u32 sh_num, u32 reg_offset, u32 *value) | 1089 | u32 sh_num, u32 reg_offset, u32 *value) |
| 1033 | { | 1090 | { |
| @@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num, | |||
| 1039 | continue; | 1096 | continue; |
| 1040 | 1097 | ||
| 1041 | if (!si_allowed_read_registers[i].untouched) | 1098 | if (!si_allowed_read_registers[i].untouched) |
| 1042 | *value = si_allowed_read_registers[i].grbm_indexed ? | 1099 | *value = si_get_register_value(adev, |
| 1043 | si_read_indexed_register(adev, se_num, | 1100 | si_allowed_read_registers[i].grbm_indexed, |
| 1044 | sh_num, reg_offset) : | 1101 | se_num, sh_num, reg_offset); |
| 1045 | RREG32(reg_offset); | ||
| 1046 | return 0; | 1102 | return 0; |
| 1047 | } | 1103 | } |
| 1048 | return -EINVAL; | 1104 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h index fde2086246fa..dc9e0e6b4558 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_enums.h +++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h | |||
| @@ -143,8 +143,8 @@ | |||
| 143 | #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D | 143 | #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D |
| 144 | 144 | ||
| 145 | #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 | 145 | #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 |
| 146 | #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 | 146 | #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002 |
| 147 | #define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 | 147 | #define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 |
| 148 | 148 | ||
| 149 | #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ | 149 | #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ |
| 150 | (((op) & 0xFF) << 8) | \ | 150 | (((op) & 0xFF) << 8) | \ |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 7fb9137dd89b..b34cefc7ebd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
| @@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle) | |||
| 159 | 159 | ||
| 160 | uvd_v4_2_enable_mgcg(adev, true); | 160 | uvd_v4_2_enable_mgcg(adev, true); |
| 161 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); | 161 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
| 162 | r = uvd_v4_2_start(adev); | ||
| 163 | if (r) | ||
| 164 | goto done; | ||
| 165 | 162 | ||
| 166 | ring->ready = true; | 163 | ring->ready = true; |
| 167 | r = amdgpu_ring_test_ring(ring); | 164 | r = amdgpu_ring_test_ring(ring); |
| @@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle) | |||
| 198 | amdgpu_ring_commit(ring); | 195 | amdgpu_ring_commit(ring); |
| 199 | 196 | ||
| 200 | done: | 197 | done: |
| 201 | |||
| 202 | if (!r) | 198 | if (!r) |
| 203 | DRM_INFO("UVD initialized successfully.\n"); | 199 | DRM_INFO("UVD initialized successfully.\n"); |
| 204 | 200 | ||
| @@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle) | |||
| 217 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 213 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 218 | struct amdgpu_ring *ring = &adev->uvd.ring; | 214 | struct amdgpu_ring *ring = &adev->uvd.ring; |
| 219 | 215 | ||
| 220 | uvd_v4_2_stop(adev); | 216 | if (RREG32(mmUVD_STATUS) != 0) |
| 217 | uvd_v4_2_stop(adev); | ||
| 218 | |||
| 221 | ring->ready = false; | 219 | ring->ready = false; |
| 222 | 220 | ||
| 223 | return 0; | 221 | return 0; |
| @@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) | |||
| 267 | struct amdgpu_ring *ring = &adev->uvd.ring; | 265 | struct amdgpu_ring *ring = &adev->uvd.ring; |
| 268 | uint32_t rb_bufsz; | 266 | uint32_t rb_bufsz; |
| 269 | int i, j, r; | 267 | int i, j, r; |
| 268 | u32 tmp; | ||
| 270 | /* disable byte swapping */ | 269 | /* disable byte swapping */ |
| 271 | u32 lmi_swap_cntl = 0; | 270 | u32 lmi_swap_cntl = 0; |
| 272 | u32 mp_swap_cntl = 0; | 271 | u32 mp_swap_cntl = 0; |
| 273 | 272 | ||
| 274 | WREG32(mmUVD_CGC_GATE, 0); | 273 | /* set uvd busy */ |
| 275 | uvd_v4_2_set_dcm(adev, true); | 274 | WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); |
| 276 | |||
| 277 | uvd_v4_2_mc_resume(adev); | ||
| 278 | 275 | ||
| 279 | /* disable interupt */ | 276 | uvd_v4_2_set_dcm(adev, true); |
| 280 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | 277 | WREG32(mmUVD_CGC_GATE, 0); |
| 281 | |||
| 282 | /* Stall UMC and register bus before resetting VCPU */ | ||
| 283 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
| 284 | mdelay(1); | ||
| 285 | |||
| 286 | /* put LMI, VCPU, RBC etc... into reset */ | ||
| 287 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | ||
| 288 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | ||
| 289 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | ||
| 290 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | ||
| 291 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | ||
| 292 | mdelay(5); | ||
| 293 | 278 | ||
| 294 | /* take UVD block out of reset */ | 279 | /* take UVD block out of reset */ |
| 295 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | 280 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); |
| 296 | mdelay(5); | 281 | mdelay(5); |
| 297 | 282 | ||
| 298 | /* initialize UVD memory controller */ | 283 | /* enable VCPU clock */ |
| 299 | WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | 284 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); |
| 300 | (1 << 21) | (1 << 9) | (1 << 20)); | 285 | |
| 286 | /* disable interupt */ | ||
| 287 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | ||
| 301 | 288 | ||
| 302 | #ifdef __BIG_ENDIAN | 289 | #ifdef __BIG_ENDIAN |
| 303 | /* swap (8 in 32) RB and IB */ | 290 | /* swap (8 in 32) RB and IB */ |
| @@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) | |||
| 306 | #endif | 293 | #endif |
| 307 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | 294 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); |
| 308 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | 295 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); |
| 296 | /* initialize UVD memory controller */ | ||
| 297 | WREG32(mmUVD_LMI_CTRL, 0x203108); | ||
| 298 | |||
| 299 | tmp = RREG32(mmUVD_MPC_CNTL); | ||
| 300 | WREG32(mmUVD_MPC_CNTL, tmp | 0x10); | ||
| 309 | 301 | ||
| 310 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | 302 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); |
| 311 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | 303 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); |
| @@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) | |||
| 314 | WREG32(mmUVD_MPC_SET_ALU, 0); | 306 | WREG32(mmUVD_MPC_SET_ALU, 0); |
| 315 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | 307 | WREG32(mmUVD_MPC_SET_MUX, 0x88); |
| 316 | 308 | ||
| 317 | /* take all subblocks out of reset, except VCPU */ | 309 | uvd_v4_2_mc_resume(adev); |
| 318 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
| 319 | mdelay(5); | ||
| 320 | 310 | ||
| 321 | /* enable VCPU clock */ | 311 | tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); |
| 322 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); | 312 | WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); |
| 323 | 313 | ||
| 324 | /* enable UMC */ | 314 | /* enable UMC */ |
| 325 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | 315 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); |
| 326 | 316 | ||
| 327 | /* boot up the VCPU */ | 317 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); |
| 328 | WREG32(mmUVD_SOFT_RESET, 0); | 318 | |
| 319 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | ||
| 320 | |||
| 321 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
| 322 | |||
| 329 | mdelay(10); | 323 | mdelay(10); |
| 330 | 324 | ||
| 331 | for (i = 0; i < 10; ++i) { | 325 | for (i = 0; i < 10; ++i) { |
| @@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) | |||
| 357 | /* enable interupt */ | 351 | /* enable interupt */ |
| 358 | WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); | 352 | WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); |
| 359 | 353 | ||
| 354 | WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); | ||
| 355 | |||
| 360 | /* force RBC into idle state */ | 356 | /* force RBC into idle state */ |
| 361 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | 357 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); |
| 362 | 358 | ||
| @@ -393,22 +389,57 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) | |||
| 393 | */ | 389 | */ |
| 394 | static void uvd_v4_2_stop(struct amdgpu_device *adev) | 390 | static void uvd_v4_2_stop(struct amdgpu_device *adev) |
| 395 | { | 391 | { |
| 396 | /* force RBC into idle state */ | 392 | uint32_t i, j; |
| 393 | uint32_t status; | ||
| 394 | |||
| 397 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | 395 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); |
| 398 | 396 | ||
| 397 | for (i = 0; i < 10; ++i) { | ||
| 398 | for (j = 0; j < 100; ++j) { | ||
| 399 | status = RREG32(mmUVD_STATUS); | ||
| 400 | if (status & 2) | ||
| 401 | break; | ||
| 402 | mdelay(1); | ||
| 403 | } | ||
| 404 | if (status & 2) | ||
| 405 | break; | ||
| 406 | } | ||
| 407 | |||
| 408 | for (i = 0; i < 10; ++i) { | ||
| 409 | for (j = 0; j < 100; ++j) { | ||
| 410 | status = RREG32(mmUVD_LMI_STATUS); | ||
| 411 | if (status & 0xf) | ||
| 412 | break; | ||
| 413 | mdelay(1); | ||
| 414 | } | ||
| 415 | if (status & 0xf) | ||
| 416 | break; | ||
| 417 | } | ||
| 418 | |||
| 399 | /* Stall UMC and register bus before resetting VCPU */ | 419 | /* Stall UMC and register bus before resetting VCPU */ |
| 400 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | 420 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); |
| 401 | mdelay(1); | ||
| 402 | 421 | ||
| 403 | /* put VCPU into reset */ | 422 | for (i = 0; i < 10; ++i) { |
| 404 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | 423 | for (j = 0; j < 100; ++j) { |
| 405 | mdelay(5); | 424 | status = RREG32(mmUVD_LMI_STATUS); |
| 425 | if (status & 0x240) | ||
| 426 | break; | ||
| 427 | mdelay(1); | ||
| 428 | } | ||
| 429 | if (status & 0x240) | ||
| 430 | break; | ||
| 431 | } | ||
| 406 | 432 | ||
| 407 | /* disable VCPU clock */ | 433 | WREG32_P(0x3D49, 0, ~(1 << 2)); |
| 408 | WREG32(mmUVD_VCPU_CNTL, 0x0); | ||
| 409 | 434 | ||
| 410 | /* Unstall UMC and register bus */ | 435 | WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); |
| 411 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | 436 | |
| 437 | /* put LMI, VCPU, RBC etc... into reset */ | ||
| 438 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | ||
| 439 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | | ||
| 440 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | ||
| 441 | |||
| 442 | WREG32(mmUVD_STATUS, 0); | ||
| 412 | 443 | ||
| 413 | uvd_v4_2_set_dcm(adev, false); | 444 | uvd_v4_2_set_dcm(adev, false); |
| 414 | } | 445 | } |
| @@ -694,8 +725,26 @@ static int uvd_v4_2_set_powergating_state(void *handle, | |||
| 694 | 725 | ||
| 695 | if (state == AMD_PG_STATE_GATE) { | 726 | if (state == AMD_PG_STATE_GATE) { |
| 696 | uvd_v4_2_stop(adev); | 727 | uvd_v4_2_stop(adev); |
| 728 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { | ||
| 729 | if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & | ||
| 730 | CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { | ||
| 731 | WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | | ||
| 732 | UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | | ||
| 733 | UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); | ||
| 734 | mdelay(20); | ||
| 735 | } | ||
| 736 | } | ||
| 697 | return 0; | 737 | return 0; |
| 698 | } else { | 738 | } else { |
| 739 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { | ||
| 740 | if (RREG32_SMC(ixCURRENT_PG_STATUS) & | ||
| 741 | CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { | ||
| 742 | WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | | ||
| 743 | UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | | ||
| 744 | UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); | ||
| 745 | mdelay(30); | ||
| 746 | } | ||
| 747 | } | ||
| 699 | return uvd_v4_2_start(adev); | 748 | return uvd_v4_2_start(adev); |
| 700 | } | 749 | } |
| 701 | } | 750 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 9b49824233ae..ad8c02e423d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
| @@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle) | |||
| 152 | uint32_t tmp; | 152 | uint32_t tmp; |
| 153 | int r; | 153 | int r; |
| 154 | 154 | ||
| 155 | r = uvd_v5_0_start(adev); | 155 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
| 156 | if (r) | 156 | uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); |
| 157 | goto done; | 157 | uvd_v5_0_enable_mgcg(adev, true); |
| 158 | 158 | ||
| 159 | ring->ready = true; | 159 | ring->ready = true; |
| 160 | r = amdgpu_ring_test_ring(ring); | 160 | r = amdgpu_ring_test_ring(ring); |
| @@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle) | |||
| 189 | amdgpu_ring_write(ring, 3); | 189 | amdgpu_ring_write(ring, 3); |
| 190 | 190 | ||
| 191 | amdgpu_ring_commit(ring); | 191 | amdgpu_ring_commit(ring); |
| 192 | |||
| 192 | done: | 193 | done: |
| 193 | if (!r) | 194 | if (!r) |
| 194 | DRM_INFO("UVD initialized successfully.\n"); | 195 | DRM_INFO("UVD initialized successfully.\n"); |
| 195 | 196 | ||
| 196 | return r; | 197 | return r; |
| 198 | |||
| 197 | } | 199 | } |
| 198 | 200 | ||
| 199 | /** | 201 | /** |
| @@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle) | |||
| 208 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 210 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 209 | struct amdgpu_ring *ring = &adev->uvd.ring; | 211 | struct amdgpu_ring *ring = &adev->uvd.ring; |
| 210 | 212 | ||
| 211 | uvd_v5_0_stop(adev); | 213 | if (RREG32(mmUVD_STATUS) != 0) |
| 214 | uvd_v5_0_stop(adev); | ||
| 215 | |||
| 212 | ring->ready = false; | 216 | ring->ready = false; |
| 213 | 217 | ||
| 214 | return 0; | 218 | return 0; |
| @@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev) | |||
| 310 | 314 | ||
| 311 | uvd_v5_0_mc_resume(adev); | 315 | uvd_v5_0_mc_resume(adev); |
| 312 | 316 | ||
| 313 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); | ||
| 314 | uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); | ||
| 315 | uvd_v5_0_enable_mgcg(adev, true); | ||
| 316 | |||
| 317 | /* disable interupt */ | 317 | /* disable interupt */ |
| 318 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | 318 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); |
| 319 | 319 | ||
| @@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev) | |||
| 456 | 456 | ||
| 457 | /* Unstall UMC and register bus */ | 457 | /* Unstall UMC and register bus */ |
| 458 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | 458 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); |
| 459 | |||
| 460 | WREG32(mmUVD_STATUS, 0); | ||
| 459 | } | 461 | } |
| 460 | 462 | ||
| 461 | /** | 463 | /** |
| @@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle, | |||
| 792 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 794 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 793 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | 795 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
| 794 | 796 | ||
| 795 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
| 796 | return 0; | ||
| 797 | |||
| 798 | if (enable) { | 797 | if (enable) { |
| 799 | /* wait for STATUS to clear */ | 798 | /* wait for STATUS to clear */ |
| 800 | if (uvd_v5_0_wait_for_idle(handle)) | 799 | if (uvd_v5_0_wait_for_idle(handle)) |
| @@ -824,17 +823,12 @@ static int uvd_v5_0_set_powergating_state(void *handle, | |||
| 824 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 823 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 825 | int ret = 0; | 824 | int ret = 0; |
| 826 | 825 | ||
| 827 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) | ||
| 828 | return 0; | ||
| 829 | |||
| 830 | if (state == AMD_PG_STATE_GATE) { | 826 | if (state == AMD_PG_STATE_GATE) { |
| 831 | uvd_v5_0_stop(adev); | 827 | uvd_v5_0_stop(adev); |
| 832 | adev->uvd.is_powergated = true; | ||
| 833 | } else { | 828 | } else { |
| 834 | ret = uvd_v5_0_start(adev); | 829 | ret = uvd_v5_0_start(adev); |
| 835 | if (ret) | 830 | if (ret) |
| 836 | goto out; | 831 | goto out; |
| 837 | adev->uvd.is_powergated = false; | ||
| 838 | } | 832 | } |
| 839 | 833 | ||
| 840 | out: | 834 | out: |
| @@ -848,7 +842,8 @@ static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) | |||
| 848 | 842 | ||
| 849 | mutex_lock(&adev->pm.mutex); | 843 | mutex_lock(&adev->pm.mutex); |
| 850 | 844 | ||
| 851 | if (adev->uvd.is_powergated) { | 845 | if (RREG32_SMC(ixCURRENT_PG_STATUS) & |
| 846 | CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { | ||
| 852 | DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); | 847 | DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); |
| 853 | goto out; | 848 | goto out; |
| 854 | } | 849 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index de7e03544d00..18a6de4e1512 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
| @@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle) | |||
| 155 | uint32_t tmp; | 155 | uint32_t tmp; |
| 156 | int r; | 156 | int r; |
| 157 | 157 | ||
| 158 | r = uvd_v6_0_start(adev); | 158 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
| 159 | if (r) | 159 | uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); |
| 160 | goto done; | 160 | uvd_v6_0_enable_mgcg(adev, true); |
| 161 | 161 | ||
| 162 | ring->ready = true; | 162 | ring->ready = true; |
| 163 | r = amdgpu_ring_test_ring(ring); | 163 | r = amdgpu_ring_test_ring(ring); |
| @@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle) | |||
| 212 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 212 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 213 | struct amdgpu_ring *ring = &adev->uvd.ring; | 213 | struct amdgpu_ring *ring = &adev->uvd.ring; |
| 214 | 214 | ||
| 215 | uvd_v6_0_stop(adev); | 215 | if (RREG32(mmUVD_STATUS) != 0) |
| 216 | uvd_v6_0_stop(adev); | ||
| 217 | |||
| 216 | ring->ready = false; | 218 | ring->ready = false; |
| 217 | 219 | ||
| 218 | return 0; | 220 | return 0; |
| @@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) | |||
| 397 | lmi_swap_cntl = 0; | 399 | lmi_swap_cntl = 0; |
| 398 | mp_swap_cntl = 0; | 400 | mp_swap_cntl = 0; |
| 399 | 401 | ||
| 400 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); | ||
| 401 | uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); | ||
| 402 | uvd_v6_0_enable_mgcg(adev, true); | ||
| 403 | uvd_v6_0_mc_resume(adev); | 402 | uvd_v6_0_mc_resume(adev); |
| 404 | 403 | ||
| 405 | /* disable interupt */ | 404 | /* disable interupt */ |
| @@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev) | |||
| 554 | 553 | ||
| 555 | /* Unstall UMC and register bus */ | 554 | /* Unstall UMC and register bus */ |
| 556 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | 555 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); |
| 556 | |||
| 557 | WREG32(mmUVD_STATUS, 0); | ||
| 557 | } | 558 | } |
| 558 | 559 | ||
| 559 | /** | 560 | /** |
| @@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle, | |||
| 1018 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1019 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1019 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | 1020 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
| 1020 | 1021 | ||
| 1021 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
| 1022 | return 0; | ||
| 1023 | |||
| 1024 | if (enable) { | 1022 | if (enable) { |
| 1025 | /* wait for STATUS to clear */ | 1023 | /* wait for STATUS to clear */ |
| 1026 | if (uvd_v6_0_wait_for_idle(handle)) | 1024 | if (uvd_v6_0_wait_for_idle(handle)) |
| @@ -1049,19 +1047,14 @@ static int uvd_v6_0_set_powergating_state(void *handle, | |||
| 1049 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1047 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1050 | int ret = 0; | 1048 | int ret = 0; |
| 1051 | 1049 | ||
| 1052 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) | ||
| 1053 | return 0; | ||
| 1054 | |||
| 1055 | WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); | 1050 | WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); |
| 1056 | 1051 | ||
| 1057 | if (state == AMD_PG_STATE_GATE) { | 1052 | if (state == AMD_PG_STATE_GATE) { |
| 1058 | uvd_v6_0_stop(adev); | 1053 | uvd_v6_0_stop(adev); |
| 1059 | adev->uvd.is_powergated = true; | ||
| 1060 | } else { | 1054 | } else { |
| 1061 | ret = uvd_v6_0_start(adev); | 1055 | ret = uvd_v6_0_start(adev); |
| 1062 | if (ret) | 1056 | if (ret) |
| 1063 | goto out; | 1057 | goto out; |
| 1064 | adev->uvd.is_powergated = false; | ||
| 1065 | } | 1058 | } |
| 1066 | 1059 | ||
| 1067 | out: | 1060 | out: |
| @@ -1075,7 +1068,8 @@ static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) | |||
| 1075 | 1068 | ||
| 1076 | mutex_lock(&adev->pm.mutex); | 1069 | mutex_lock(&adev->pm.mutex); |
| 1077 | 1070 | ||
| 1078 | if (adev->uvd.is_powergated) { | 1071 | if (RREG32_SMC(ixCURRENT_PG_STATUS) & |
| 1072 | CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { | ||
| 1079 | DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); | 1073 | DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); |
| 1080 | goto out; | 1074 | goto out; |
| 1081 | } | 1075 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 38ed903dd6f8..9ea99348e493 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | |||
| @@ -42,10 +42,9 @@ | |||
| 42 | #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) | 42 | #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) |
| 43 | #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 | 43 | #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 |
| 44 | 44 | ||
| 45 | static void vce_v2_0_mc_resume(struct amdgpu_device *adev); | ||
| 46 | static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); | 45 | static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); |
| 47 | static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev); | 46 | static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev); |
| 48 | static int vce_v2_0_wait_for_idle(void *handle); | 47 | |
| 49 | /** | 48 | /** |
| 50 | * vce_v2_0_ring_get_rptr - get read pointer | 49 | * vce_v2_0_ring_get_rptr - get read pointer |
| 51 | * | 50 | * |
| @@ -140,6 +139,86 @@ static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev) | |||
| 140 | return -ETIMEDOUT; | 139 | return -ETIMEDOUT; |
| 141 | } | 140 | } |
| 142 | 141 | ||
| 142 | static void vce_v2_0_disable_cg(struct amdgpu_device *adev) | ||
| 143 | { | ||
| 144 | WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7); | ||
| 145 | } | ||
| 146 | |||
| 147 | static void vce_v2_0_init_cg(struct amdgpu_device *adev) | ||
| 148 | { | ||
| 149 | u32 tmp; | ||
| 150 | |||
| 151 | tmp = RREG32(mmVCE_CLOCK_GATING_A); | ||
| 152 | tmp &= ~0xfff; | ||
| 153 | tmp |= ((0 << 0) | (4 << 4)); | ||
| 154 | tmp |= 0x40000; | ||
| 155 | WREG32(mmVCE_CLOCK_GATING_A, tmp); | ||
| 156 | |||
| 157 | tmp = RREG32(mmVCE_UENC_CLOCK_GATING); | ||
| 158 | tmp &= ~0xfff; | ||
| 159 | tmp |= ((0 << 0) | (4 << 4)); | ||
| 160 | WREG32(mmVCE_UENC_CLOCK_GATING, tmp); | ||
| 161 | |||
| 162 | tmp = RREG32(mmVCE_CLOCK_GATING_B); | ||
| 163 | tmp |= 0x10; | ||
| 164 | tmp &= ~0x100000; | ||
| 165 | WREG32(mmVCE_CLOCK_GATING_B, tmp); | ||
| 166 | } | ||
| 167 | |||
| 168 | static void vce_v2_0_mc_resume(struct amdgpu_device *adev) | ||
| 169 | { | ||
| 170 | uint64_t addr = adev->vce.gpu_addr; | ||
| 171 | uint32_t size; | ||
| 172 | |||
| 173 | WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); | ||
| 174 | WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); | ||
| 175 | WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); | ||
| 176 | WREG32(mmVCE_CLOCK_GATING_B, 0xf7); | ||
| 177 | |||
| 178 | WREG32(mmVCE_LMI_CTRL, 0x00398000); | ||
| 179 | WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); | ||
| 180 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); | ||
| 181 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); | ||
| 182 | WREG32(mmVCE_LMI_VM_CTRL, 0); | ||
| 183 | |||
| 184 | addr += AMDGPU_VCE_FIRMWARE_OFFSET; | ||
| 185 | size = VCE_V2_0_FW_SIZE; | ||
| 186 | WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); | ||
| 187 | WREG32(mmVCE_VCPU_CACHE_SIZE0, size); | ||
| 188 | |||
| 189 | addr += size; | ||
| 190 | size = VCE_V2_0_STACK_SIZE; | ||
| 191 | WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); | ||
| 192 | WREG32(mmVCE_VCPU_CACHE_SIZE1, size); | ||
| 193 | |||
| 194 | addr += size; | ||
| 195 | size = VCE_V2_0_DATA_SIZE; | ||
| 196 | WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); | ||
| 197 | WREG32(mmVCE_VCPU_CACHE_SIZE2, size); | ||
| 198 | |||
| 199 | WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); | ||
| 200 | WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); | ||
| 201 | } | ||
| 202 | |||
| 203 | static bool vce_v2_0_is_idle(void *handle) | ||
| 204 | { | ||
| 205 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 206 | |||
| 207 | return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); | ||
| 208 | } | ||
| 209 | |||
| 210 | static int vce_v2_0_wait_for_idle(void *handle) | ||
| 211 | { | ||
| 212 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 213 | unsigned i; | ||
| 214 | |||
| 215 | for (i = 0; i < adev->usec_timeout; i++) { | ||
| 216 | if (vce_v2_0_is_idle(handle)) | ||
| 217 | return 0; | ||
| 218 | } | ||
| 219 | return -ETIMEDOUT; | ||
| 220 | } | ||
| 221 | |||
| 143 | /** | 222 | /** |
| 144 | * vce_v2_0_start - start VCE block | 223 | * vce_v2_0_start - start VCE block |
| 145 | * | 224 | * |
| @@ -152,11 +231,14 @@ static int vce_v2_0_start(struct amdgpu_device *adev) | |||
| 152 | struct amdgpu_ring *ring; | 231 | struct amdgpu_ring *ring; |
| 153 | int r; | 232 | int r; |
| 154 | 233 | ||
| 155 | vce_v2_0_mc_resume(adev); | ||
| 156 | |||
| 157 | /* set BUSY flag */ | 234 | /* set BUSY flag */ |
| 158 | WREG32_P(mmVCE_STATUS, 1, ~1); | 235 | WREG32_P(mmVCE_STATUS, 1, ~1); |
| 159 | 236 | ||
| 237 | vce_v2_0_init_cg(adev); | ||
| 238 | vce_v2_0_disable_cg(adev); | ||
| 239 | |||
| 240 | vce_v2_0_mc_resume(adev); | ||
| 241 | |||
| 160 | ring = &adev->vce.ring[0]; | 242 | ring = &adev->vce.ring[0]; |
| 161 | WREG32(mmVCE_RB_RPTR, ring->wptr); | 243 | WREG32(mmVCE_RB_RPTR, ring->wptr); |
| 162 | WREG32(mmVCE_RB_WPTR, ring->wptr); | 244 | WREG32(mmVCE_RB_WPTR, ring->wptr); |
| @@ -189,6 +271,145 @@ static int vce_v2_0_start(struct amdgpu_device *adev) | |||
| 189 | return 0; | 271 | return 0; |
| 190 | } | 272 | } |
| 191 | 273 | ||
| 274 | static int vce_v2_0_stop(struct amdgpu_device *adev) | ||
| 275 | { | ||
| 276 | int i, j; | ||
| 277 | int status; | ||
| 278 | |||
| 279 | if (vce_v2_0_lmi_clean(adev)) { | ||
| 280 | DRM_INFO("vce is not idle \n"); | ||
| 281 | return 0; | ||
| 282 | } | ||
| 283 | /* | ||
| 284 | for (i = 0; i < 10; ++i) { | ||
| 285 | for (j = 0; j < 100; ++j) { | ||
| 286 | status = RREG32(mmVCE_FW_REG_STATUS); | ||
| 287 | if (!(status & 1)) | ||
| 288 | break; | ||
| 289 | mdelay(1); | ||
| 290 | } | ||
| 291 | break; | ||
| 292 | } | ||
| 293 | */ | ||
| 294 | if (vce_v2_0_wait_for_idle(adev)) { | ||
| 295 | DRM_INFO("VCE is busy, Can't set clock gateing"); | ||
| 296 | return 0; | ||
| 297 | } | ||
| 298 | |||
| 299 | /* Stall UMC and register bus before resetting VCPU */ | ||
| 300 | WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
| 301 | |||
| 302 | for (i = 0; i < 10; ++i) { | ||
| 303 | for (j = 0; j < 100; ++j) { | ||
| 304 | status = RREG32(mmVCE_LMI_STATUS); | ||
| 305 | if (status & 0x240) | ||
| 306 | break; | ||
| 307 | mdelay(1); | ||
| 308 | } | ||
| 309 | break; | ||
| 310 | } | ||
| 311 | |||
| 312 | WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001); | ||
| 313 | |||
| 314 | /* put LMI, VCPU, RBC etc... into reset */ | ||
| 315 | WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1); | ||
| 316 | |||
| 317 | WREG32(mmVCE_STATUS, 0); | ||
| 318 | |||
| 319 | return 0; | ||
| 320 | } | ||
| 321 | |||
| 322 | static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) | ||
| 323 | { | ||
| 324 | u32 tmp; | ||
| 325 | |||
| 326 | if (gated) { | ||
| 327 | tmp = RREG32(mmVCE_CLOCK_GATING_B); | ||
| 328 | tmp |= 0xe70000; | ||
| 329 | WREG32(mmVCE_CLOCK_GATING_B, tmp); | ||
| 330 | |||
| 331 | tmp = RREG32(mmVCE_UENC_CLOCK_GATING); | ||
| 332 | tmp |= 0xff000000; | ||
| 333 | WREG32(mmVCE_UENC_CLOCK_GATING, tmp); | ||
| 334 | |||
| 335 | tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); | ||
| 336 | tmp &= ~0x3fc; | ||
| 337 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); | ||
| 338 | |||
| 339 | WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); | ||
| 340 | } else { | ||
| 341 | tmp = RREG32(mmVCE_CLOCK_GATING_B); | ||
| 342 | tmp |= 0xe7; | ||
| 343 | tmp &= ~0xe70000; | ||
| 344 | WREG32(mmVCE_CLOCK_GATING_B, tmp); | ||
| 345 | |||
| 346 | tmp = RREG32(mmVCE_UENC_CLOCK_GATING); | ||
| 347 | tmp |= 0x1fe000; | ||
| 348 | tmp &= ~0xff000000; | ||
| 349 | WREG32(mmVCE_UENC_CLOCK_GATING, tmp); | ||
| 350 | |||
| 351 | tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); | ||
| 352 | tmp |= 0x3fc; | ||
| 353 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); | ||
| 354 | } | ||
| 355 | } | ||
| 356 | |||
| 357 | static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) | ||
| 358 | { | ||
| 359 | u32 orig, tmp; | ||
| 360 | |||
| 361 | /* LMI_MC/LMI_UMC always set in dynamic, | ||
| 362 | * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} | ||
| 363 | */ | ||
| 364 | tmp = RREG32(mmVCE_CLOCK_GATING_B); | ||
| 365 | tmp &= ~0x00060006; | ||
| 366 | |||
| 367 | /* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */ | ||
| 368 | if (gated) { | ||
| 369 | tmp |= 0xe10000; | ||
| 370 | WREG32(mmVCE_CLOCK_GATING_B, tmp); | ||
| 371 | } else { | ||
| 372 | tmp |= 0xe1; | ||
| 373 | tmp &= ~0xe10000; | ||
| 374 | WREG32(mmVCE_CLOCK_GATING_B, tmp); | ||
| 375 | } | ||
| 376 | |||
| 377 | orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); | ||
| 378 | tmp &= ~0x1fe000; | ||
| 379 | tmp &= ~0xff000000; | ||
| 380 | if (tmp != orig) | ||
| 381 | WREG32(mmVCE_UENC_CLOCK_GATING, tmp); | ||
| 382 | |||
| 383 | orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); | ||
| 384 | tmp &= ~0x3fc; | ||
| 385 | if (tmp != orig) | ||
| 386 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); | ||
| 387 | |||
| 388 | /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ | ||
| 389 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); | ||
| 390 | |||
| 391 | if(gated) | ||
| 392 | WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); | ||
| 393 | } | ||
| 394 | |||
| 395 | static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable, | ||
| 396 | bool sw_cg) | ||
| 397 | { | ||
| 398 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { | ||
| 399 | if (sw_cg) | ||
| 400 | vce_v2_0_set_sw_cg(adev, true); | ||
| 401 | else | ||
| 402 | vce_v2_0_set_dyn_cg(adev, true); | ||
| 403 | } else { | ||
| 404 | vce_v2_0_disable_cg(adev); | ||
| 405 | |||
| 406 | if (sw_cg) | ||
| 407 | vce_v2_0_set_sw_cg(adev, false); | ||
| 408 | else | ||
| 409 | vce_v2_0_set_dyn_cg(adev, false); | ||
| 410 | } | ||
| 411 | } | ||
| 412 | |||
| 192 | static int vce_v2_0_early_init(void *handle) | 413 | static int vce_v2_0_early_init(void *handle) |
| 193 | { | 414 | { |
| 194 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 415 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| @@ -254,11 +475,8 @@ static int vce_v2_0_hw_init(void *handle) | |||
| 254 | int r, i; | 475 | int r, i; |
| 255 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 476 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 256 | 477 | ||
| 257 | r = vce_v2_0_start(adev); | 478 | amdgpu_asic_set_vce_clocks(adev, 10000, 10000); |
| 258 | /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */ | 479 | vce_v2_0_enable_mgcg(adev, true, false); |
| 259 | if (r) | ||
| 260 | return 0; | ||
| 261 | |||
| 262 | for (i = 0; i < adev->vce.num_rings; i++) | 480 | for (i = 0; i < adev->vce.num_rings; i++) |
| 263 | adev->vce.ring[i].ready = false; | 481 | adev->vce.ring[i].ready = false; |
| 264 | 482 | ||
| @@ -312,190 +530,6 @@ static int vce_v2_0_resume(void *handle) | |||
| 312 | return r; | 530 | return r; |
| 313 | } | 531 | } |
| 314 | 532 | ||
| 315 | static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) | ||
| 316 | { | ||
| 317 | u32 tmp; | ||
| 318 | |||
| 319 | if (gated) { | ||
| 320 | tmp = RREG32(mmVCE_CLOCK_GATING_B); | ||
| 321 | tmp |= 0xe70000; | ||
| 322 | WREG32(mmVCE_CLOCK_GATING_B, tmp); | ||
| 323 | |||
| 324 | tmp = RREG32(mmVCE_UENC_CLOCK_GATING); | ||
| 325 | tmp |= 0xff000000; | ||
| 326 | WREG32(mmVCE_UENC_CLOCK_GATING, tmp); | ||
| 327 | |||
| 328 | tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); | ||
| 329 | tmp &= ~0x3fc; | ||
| 330 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); | ||
| 331 | |||
| 332 | WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); | ||
| 333 | } else { | ||
| 334 | tmp = RREG32(mmVCE_CLOCK_GATING_B); | ||
| 335 | tmp |= 0xe7; | ||
| 336 | tmp &= ~0xe70000; | ||
| 337 | WREG32(mmVCE_CLOCK_GATING_B, tmp); | ||
| 338 | |||
| 339 | tmp = RREG32(mmVCE_UENC_CLOCK_GATING); | ||
| 340 | tmp |= 0x1fe000; | ||
| 341 | tmp &= ~0xff000000; | ||
| 342 | WREG32(mmVCE_UENC_CLOCK_GATING, tmp); | ||
| 343 | |||
| 344 | tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); | ||
| 345 | tmp |= 0x3fc; | ||
| 346 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); | ||
| 347 | } | ||
| 348 | } | ||
| 349 | |||
| 350 | static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) | ||
| 351 | { | ||
| 352 | if (vce_v2_0_wait_for_idle(adev)) { | ||
| 353 | DRM_INFO("VCE is busy, Can't set clock gateing"); | ||
| 354 | return; | ||
| 355 | } | ||
| 356 | |||
| 357 | WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100); | ||
| 358 | |||
| 359 | if (vce_v2_0_lmi_clean(adev)) { | ||
| 360 | DRM_INFO("LMI is busy, Can't set clock gateing"); | ||
| 361 | return; | ||
| 362 | } | ||
| 363 | |||
| 364 | WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); | ||
| 365 | WREG32_P(mmVCE_SOFT_RESET, | ||
| 366 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | ||
| 367 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
| 368 | WREG32(mmVCE_STATUS, 0); | ||
| 369 | |||
| 370 | if (gated) | ||
| 371 | WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); | ||
| 372 | /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */ | ||
| 373 | if (gated) { | ||
| 374 | /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */ | ||
| 375 | WREG32(mmVCE_CLOCK_GATING_B, 0xe90010); | ||
| 376 | } else { | ||
| 377 | /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */ | ||
| 378 | WREG32(mmVCE_CLOCK_GATING_B, 0x800f1); | ||
| 379 | } | ||
| 380 | |||
| 381 | /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/; | ||
| 382 | WREG32(mmVCE_UENC_CLOCK_GATING, 0x40); | ||
| 383 | |||
| 384 | /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ | ||
| 385 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); | ||
| 386 | |||
| 387 | WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100); | ||
| 388 | if(!gated) { | ||
| 389 | WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); | ||
| 390 | mdelay(100); | ||
| 391 | WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
| 392 | |||
| 393 | vce_v2_0_firmware_loaded(adev); | ||
| 394 | WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | static void vce_v2_0_disable_cg(struct amdgpu_device *adev) | ||
| 399 | { | ||
| 400 | WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7); | ||
| 401 | } | ||
| 402 | |||
| 403 | static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) | ||
| 404 | { | ||
| 405 | bool sw_cg = false; | ||
| 406 | |||
| 407 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { | ||
| 408 | if (sw_cg) | ||
| 409 | vce_v2_0_set_sw_cg(adev, true); | ||
| 410 | else | ||
| 411 | vce_v2_0_set_dyn_cg(adev, true); | ||
| 412 | } else { | ||
| 413 | vce_v2_0_disable_cg(adev); | ||
| 414 | |||
| 415 | if (sw_cg) | ||
| 416 | vce_v2_0_set_sw_cg(adev, false); | ||
| 417 | else | ||
| 418 | vce_v2_0_set_dyn_cg(adev, false); | ||
| 419 | } | ||
| 420 | } | ||
| 421 | |||
| 422 | static void vce_v2_0_init_cg(struct amdgpu_device *adev) | ||
| 423 | { | ||
| 424 | u32 tmp; | ||
| 425 | |||
| 426 | tmp = RREG32(mmVCE_CLOCK_GATING_A); | ||
| 427 | tmp &= ~0xfff; | ||
| 428 | tmp |= ((0 << 0) | (4 << 4)); | ||
| 429 | tmp |= 0x40000; | ||
| 430 | WREG32(mmVCE_CLOCK_GATING_A, tmp); | ||
| 431 | |||
| 432 | tmp = RREG32(mmVCE_UENC_CLOCK_GATING); | ||
| 433 | tmp &= ~0xfff; | ||
| 434 | tmp |= ((0 << 0) | (4 << 4)); | ||
| 435 | WREG32(mmVCE_UENC_CLOCK_GATING, tmp); | ||
| 436 | |||
| 437 | tmp = RREG32(mmVCE_CLOCK_GATING_B); | ||
| 438 | tmp |= 0x10; | ||
| 439 | tmp &= ~0x100000; | ||
| 440 | WREG32(mmVCE_CLOCK_GATING_B, tmp); | ||
| 441 | } | ||
| 442 | |||
| 443 | static void vce_v2_0_mc_resume(struct amdgpu_device *adev) | ||
| 444 | { | ||
| 445 | uint64_t addr = adev->vce.gpu_addr; | ||
| 446 | uint32_t size; | ||
| 447 | |||
| 448 | WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); | ||
| 449 | WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); | ||
| 450 | WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); | ||
| 451 | WREG32(mmVCE_CLOCK_GATING_B, 0xf7); | ||
| 452 | |||
| 453 | WREG32(mmVCE_LMI_CTRL, 0x00398000); | ||
| 454 | WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); | ||
| 455 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); | ||
| 456 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); | ||
| 457 | WREG32(mmVCE_LMI_VM_CTRL, 0); | ||
| 458 | |||
| 459 | addr += AMDGPU_VCE_FIRMWARE_OFFSET; | ||
| 460 | size = VCE_V2_0_FW_SIZE; | ||
| 461 | WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); | ||
| 462 | WREG32(mmVCE_VCPU_CACHE_SIZE0, size); | ||
| 463 | |||
| 464 | addr += size; | ||
| 465 | size = VCE_V2_0_STACK_SIZE; | ||
| 466 | WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); | ||
| 467 | WREG32(mmVCE_VCPU_CACHE_SIZE1, size); | ||
| 468 | |||
| 469 | addr += size; | ||
| 470 | size = VCE_V2_0_DATA_SIZE; | ||
| 471 | WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); | ||
| 472 | WREG32(mmVCE_VCPU_CACHE_SIZE2, size); | ||
| 473 | |||
| 474 | WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); | ||
| 475 | WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); | ||
| 476 | |||
| 477 | vce_v2_0_init_cg(adev); | ||
| 478 | } | ||
| 479 | |||
| 480 | static bool vce_v2_0_is_idle(void *handle) | ||
| 481 | { | ||
| 482 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 483 | |||
| 484 | return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); | ||
| 485 | } | ||
| 486 | |||
| 487 | static int vce_v2_0_wait_for_idle(void *handle) | ||
| 488 | { | ||
| 489 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 490 | unsigned i; | ||
| 491 | |||
| 492 | for (i = 0; i < adev->usec_timeout; i++) { | ||
| 493 | if (vce_v2_0_is_idle(handle)) | ||
| 494 | return 0; | ||
| 495 | } | ||
| 496 | return -ETIMEDOUT; | ||
| 497 | } | ||
| 498 | |||
| 499 | static int vce_v2_0_soft_reset(void *handle) | 533 | static int vce_v2_0_soft_reset(void *handle) |
| 500 | { | 534 | { |
| 501 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 535 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| @@ -539,33 +573,20 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, | |||
| 539 | return 0; | 573 | return 0; |
| 540 | } | 574 | } |
| 541 | 575 | ||
| 542 | static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) | ||
| 543 | { | ||
| 544 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); | ||
| 545 | |||
| 546 | if (enable) | ||
| 547 | tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; | ||
| 548 | else | ||
| 549 | tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; | ||
| 550 | |||
| 551 | WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); | ||
| 552 | } | ||
| 553 | |||
| 554 | |||
| 555 | static int vce_v2_0_set_clockgating_state(void *handle, | 576 | static int vce_v2_0_set_clockgating_state(void *handle, |
| 556 | enum amd_clockgating_state state) | 577 | enum amd_clockgating_state state) |
| 557 | { | 578 | { |
| 558 | bool gate = false; | 579 | bool gate = false; |
| 559 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 580 | bool sw_cg = false; |
| 560 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | ||
| 561 | |||
| 562 | 581 | ||
| 563 | vce_v2_0_set_bypass_mode(adev, enable); | 582 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 564 | 583 | ||
| 565 | if (state == AMD_CG_STATE_GATE) | 584 | if (state == AMD_CG_STATE_GATE) { |
| 566 | gate = true; | 585 | gate = true; |
| 586 | sw_cg = true; | ||
| 587 | } | ||
| 567 | 588 | ||
| 568 | vce_v2_0_enable_mgcg(adev, gate); | 589 | vce_v2_0_enable_mgcg(adev, gate, sw_cg); |
| 569 | 590 | ||
| 570 | return 0; | 591 | return 0; |
| 571 | } | 592 | } |
| @@ -582,12 +603,8 @@ static int vce_v2_0_set_powergating_state(void *handle, | |||
| 582 | */ | 603 | */ |
| 583 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 604 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 584 | 605 | ||
| 585 | if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) | ||
| 586 | return 0; | ||
| 587 | |||
| 588 | if (state == AMD_PG_STATE_GATE) | 606 | if (state == AMD_PG_STATE_GATE) |
| 589 | /* XXX do we need a vce_v2_0_stop()? */ | 607 | return vce_v2_0_stop(adev); |
| 590 | return 0; | ||
| 591 | else | 608 | else |
| 592 | return vce_v2_0_start(adev); | 609 | return vce_v2_0_start(adev); |
| 593 | } | 610 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 8db26559fd1b..93ec8815bb13 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
| @@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
| 230 | struct amdgpu_ring *ring; | 230 | struct amdgpu_ring *ring; |
| 231 | int idx, r; | 231 | int idx, r; |
| 232 | 232 | ||
| 233 | vce_v3_0_override_vce_clock_gating(adev, true); | ||
| 234 | if (!(adev->flags & AMD_IS_APU)) | ||
| 235 | amdgpu_asic_set_vce_clocks(adev, 10000, 10000); | ||
| 236 | |||
| 237 | ring = &adev->vce.ring[0]; | 233 | ring = &adev->vce.ring[0]; |
| 238 | WREG32(mmVCE_RB_RPTR, ring->wptr); | 234 | WREG32(mmVCE_RB_RPTR, ring->wptr); |
| 239 | WREG32(mmVCE_RB_WPTR, ring->wptr); | 235 | WREG32(mmVCE_RB_WPTR, ring->wptr); |
| @@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle) | |||
| 436 | int r, i; | 432 | int r, i; |
| 437 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 433 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 438 | 434 | ||
| 439 | r = vce_v3_0_start(adev); | 435 | vce_v3_0_override_vce_clock_gating(adev, true); |
| 440 | if (r) | 436 | if (!(adev->flags & AMD_IS_APU)) |
| 441 | return r; | 437 | amdgpu_asic_set_vce_clocks(adev, 10000, 10000); |
| 442 | 438 | ||
| 443 | for (i = 0; i < adev->vce.num_rings; i++) | 439 | for (i = 0; i < adev->vce.num_rings; i++) |
| 444 | adev->vce.ring[i].ready = false; | 440 | adev->vce.ring[i].ready = false; |
| @@ -514,6 +510,8 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) | |||
| 514 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); | 510 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); |
| 515 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); | 511 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); |
| 516 | WREG32(mmVCE_LMI_VM_CTRL, 0); | 512 | WREG32(mmVCE_LMI_VM_CTRL, 0); |
| 513 | WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000); | ||
| 514 | |||
| 517 | if (adev->asic_type >= CHIP_STONEY) { | 515 | if (adev->asic_type >= CHIP_STONEY) { |
| 518 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); | 516 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); |
| 519 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); | 517 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); |
| @@ -766,17 +764,14 @@ static int vce_v3_0_set_powergating_state(void *handle, | |||
| 766 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 764 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 767 | int ret = 0; | 765 | int ret = 0; |
| 768 | 766 | ||
| 769 | if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) | ||
| 770 | return 0; | ||
| 771 | |||
| 772 | if (state == AMD_PG_STATE_GATE) { | 767 | if (state == AMD_PG_STATE_GATE) { |
| 773 | adev->vce.is_powergated = true; | 768 | ret = vce_v3_0_stop(adev); |
| 774 | /* XXX do we need a vce_v3_0_stop()? */ | 769 | if (ret) |
| 770 | goto out; | ||
| 775 | } else { | 771 | } else { |
| 776 | ret = vce_v3_0_start(adev); | 772 | ret = vce_v3_0_start(adev); |
| 777 | if (ret) | 773 | if (ret) |
| 778 | goto out; | 774 | goto out; |
| 779 | adev->vce.is_powergated = false; | ||
| 780 | } | 775 | } |
| 781 | 776 | ||
| 782 | out: | 777 | out: |
| @@ -790,7 +785,8 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) | |||
| 790 | 785 | ||
| 791 | mutex_lock(&adev->pm.mutex); | 786 | mutex_lock(&adev->pm.mutex); |
| 792 | 787 | ||
| 793 | if (adev->vce.is_powergated) { | 788 | if (RREG32_SMC(ixCURRENT_PG_STATUS) & |
| 789 | CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { | ||
| 794 | DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); | 790 | DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); |
| 795 | goto out; | 791 | goto out; |
| 796 | } | 792 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 4922fff08c3c..50bdb24ef8d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
| @@ -721,6 +721,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) | |||
| 721 | if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { | 721 | if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { |
| 722 | /* enable BM */ | 722 | /* enable BM */ |
| 723 | pci_set_master(adev->pdev); | 723 | pci_set_master(adev->pdev); |
| 724 | adev->has_hw_reset = true; | ||
| 724 | return 0; | 725 | return 0; |
| 725 | } | 726 | } |
| 726 | udelay(1); | 727 | udelay(1); |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h index f9fd2ea4625b..dbc2e723f659 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h | |||
| @@ -1310,5 +1310,6 @@ | |||
| 1310 | #define ixROM_SW_DATA_62 0xc060012c | 1310 | #define ixROM_SW_DATA_62 0xc060012c |
| 1311 | #define ixROM_SW_DATA_63 0xc0600130 | 1311 | #define ixROM_SW_DATA_63 0xc0600130 |
| 1312 | #define ixROM_SW_DATA_64 0xc0600134 | 1312 | #define ixROM_SW_DATA_64 0xc0600134 |
| 1313 | #define ixCURRENT_PG_STATUS 0xc020029c | ||
| 1313 | 1314 | ||
| 1314 | #endif /* SMU_7_0_1_D_H */ | 1315 | #endif /* SMU_7_0_1_D_H */ |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h index 25882a4dea5d..34c6ff52710e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h | |||
| @@ -5452,5 +5452,7 @@ | |||
| 5452 | #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 | 5452 | #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 |
| 5453 | #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff | 5453 | #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff |
| 5454 | #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 | 5454 | #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 |
| 5455 | #define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 | ||
| 5456 | #define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 | ||
| 5455 | 5457 | ||
| 5456 | #endif /* SMU_7_0_1_SH_MASK_H */ | 5458 | #endif /* SMU_7_0_1_SH_MASK_H */ |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h index a9ef1562f43b..66597c64f525 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h | |||
| @@ -1121,5 +1121,6 @@ | |||
| 1121 | #define ixROM_SW_DATA_62 0xc060011c | 1121 | #define ixROM_SW_DATA_62 0xc060011c |
| 1122 | #define ixROM_SW_DATA_63 0xc0600120 | 1122 | #define ixROM_SW_DATA_63 0xc0600120 |
| 1123 | #define ixROM_SW_DATA_64 0xc0600124 | 1123 | #define ixROM_SW_DATA_64 0xc0600124 |
| 1124 | #define ixCURRENT_PG_STATUS 0xc020029c | ||
| 1124 | 1125 | ||
| 1125 | #endif /* SMU_7_1_1_D_H */ | 1126 | #endif /* SMU_7_1_1_D_H */ |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h index 2c997f7b5d13..fb06f2e2f6e6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h | |||
| @@ -4860,5 +4860,7 @@ | |||
| 4860 | #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 | 4860 | #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 |
| 4861 | #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff | 4861 | #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff |
| 4862 | #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 | 4862 | #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 |
| 4863 | #define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 | ||
| 4864 | #define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 | ||
| 4863 | 4865 | ||
| 4864 | #endif /* SMU_7_1_1_SH_MASK_H */ | 4866 | #endif /* SMU_7_1_1_SH_MASK_H */ |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h index 22dd4c2b7290..4446d43d2a8f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h | |||
| @@ -1271,5 +1271,6 @@ | |||
| 1271 | #define ixROM_SW_DATA_62 0xc060011c | 1271 | #define ixROM_SW_DATA_62 0xc060011c |
| 1272 | #define ixROM_SW_DATA_63 0xc0600120 | 1272 | #define ixROM_SW_DATA_63 0xc0600120 |
| 1273 | #define ixROM_SW_DATA_64 0xc0600124 | 1273 | #define ixROM_SW_DATA_64 0xc0600124 |
| 1274 | #define ixCURRENT_PG_STATUS 0xc020029c | ||
| 1274 | 1275 | ||
| 1275 | #endif /* SMU_7_1_2_D_H */ | 1276 | #endif /* SMU_7_1_2_D_H */ |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h index 518fd02e9d35..627906674fe8 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h | |||
| @@ -5830,5 +5830,7 @@ | |||
| 5830 | #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 | 5830 | #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 |
| 5831 | #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff | 5831 | #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff |
| 5832 | #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 | 5832 | #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 |
| 5833 | #define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 | ||
| 5834 | #define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 | ||
| 5833 | 5835 | ||
| 5834 | #endif /* SMU_7_1_2_SH_MASK_H */ | 5836 | #endif /* SMU_7_1_2_SH_MASK_H */ |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h index eca2b851f25f..0333d880bc9e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h | |||
| @@ -1244,5 +1244,5 @@ | |||
| 1244 | #define ixGC_CAC_ACC_CU14 0xc8 | 1244 | #define ixGC_CAC_ACC_CU14 0xc8 |
| 1245 | #define ixGC_CAC_ACC_CU15 0xc9 | 1245 | #define ixGC_CAC_ACC_CU15 0xc9 |
| 1246 | #define ixGC_CAC_OVRD_CU 0xe7 | 1246 | #define ixGC_CAC_OVRD_CU 0xe7 |
| 1247 | 1247 | #define ixCURRENT_PG_STATUS 0xc020029c | |
| 1248 | #endif /* SMU_7_1_3_D_H */ | 1248 | #endif /* SMU_7_1_3_D_H */ |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h index 1ede9e274714..654c1093d362 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h | |||
| @@ -6076,5 +6076,8 @@ | |||
| 6076 | #define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0 | 6076 | #define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0 |
| 6077 | #define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000 | 6077 | #define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000 |
| 6078 | #define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10 | 6078 | #define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10 |
| 6079 | #define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 | ||
| 6080 | #define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 | ||
| 6081 | |||
| 6079 | 6082 | ||
| 6080 | #endif /* SMU_7_1_3_SH_MASK_H */ | 6083 | #endif /* SMU_7_1_3_SH_MASK_H */ |
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 1d26ae768147..17b9d41f3e87 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h | |||
| @@ -171,6 +171,7 @@ struct cgs_firmware_info { | |||
| 171 | uint32_t ucode_start_address; | 171 | uint32_t ucode_start_address; |
| 172 | 172 | ||
| 173 | void *kptr; | 173 | void *kptr; |
| 174 | bool is_kicker; | ||
| 174 | }; | 175 | }; |
| 175 | 176 | ||
| 176 | struct cgs_mode_info { | 177 | struct cgs_mode_info { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 3eccac735db3..b33935fcf428 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c | |||
| @@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 161 | { | 161 | { |
| 162 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 162 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
| 163 | 163 | ||
| 164 | if (cz_hwmgr->uvd_power_gated == bgate) | ||
| 165 | return 0; | ||
| 166 | |||
| 167 | cz_hwmgr->uvd_power_gated = bgate; | 164 | cz_hwmgr->uvd_power_gated = bgate; |
| 168 | 165 | ||
| 169 | if (bgate) { | 166 | if (bgate) { |
| 170 | cgs_set_clockgating_state(hwmgr->device, | ||
| 171 | AMD_IP_BLOCK_TYPE_UVD, | ||
| 172 | AMD_CG_STATE_GATE); | ||
| 173 | cgs_set_powergating_state(hwmgr->device, | 167 | cgs_set_powergating_state(hwmgr->device, |
| 174 | AMD_IP_BLOCK_TYPE_UVD, | 168 | AMD_IP_BLOCK_TYPE_UVD, |
| 175 | AMD_PG_STATE_GATE); | 169 | AMD_PG_STATE_GATE); |
| 170 | cgs_set_clockgating_state(hwmgr->device, | ||
| 171 | AMD_IP_BLOCK_TYPE_UVD, | ||
| 172 | AMD_CG_STATE_GATE); | ||
| 176 | cz_dpm_update_uvd_dpm(hwmgr, true); | 173 | cz_dpm_update_uvd_dpm(hwmgr, true); |
| 177 | cz_dpm_powerdown_uvd(hwmgr); | 174 | cz_dpm_powerdown_uvd(hwmgr); |
| 178 | } else { | 175 | } else { |
| 179 | cz_dpm_powerup_uvd(hwmgr); | 176 | cz_dpm_powerup_uvd(hwmgr); |
| 180 | cgs_set_powergating_state(hwmgr->device, | ||
| 181 | AMD_IP_BLOCK_TYPE_UVD, | ||
| 182 | AMD_CG_STATE_UNGATE); | ||
| 183 | cgs_set_clockgating_state(hwmgr->device, | 177 | cgs_set_clockgating_state(hwmgr->device, |
| 184 | AMD_IP_BLOCK_TYPE_UVD, | 178 | AMD_IP_BLOCK_TYPE_UVD, |
| 185 | AMD_PG_STATE_UNGATE); | 179 | AMD_PG_STATE_UNGATE); |
| 180 | cgs_set_powergating_state(hwmgr->device, | ||
| 181 | AMD_IP_BLOCK_TYPE_UVD, | ||
| 182 | AMD_CG_STATE_UNGATE); | ||
| 186 | cz_dpm_update_uvd_dpm(hwmgr, false); | 183 | cz_dpm_update_uvd_dpm(hwmgr, false); |
| 187 | } | 184 | } |
| 188 | 185 | ||
| @@ -193,47 +190,34 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 193 | { | 190 | { |
| 194 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 191 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
| 195 | 192 | ||
| 196 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 193 | if (bgate) { |
| 197 | PHM_PlatformCaps_VCEPowerGating)) { | 194 | cgs_set_powergating_state( |
| 198 | if (cz_hwmgr->vce_power_gated != bgate) { | 195 | hwmgr->device, |
| 199 | if (bgate) { | 196 | AMD_IP_BLOCK_TYPE_VCE, |
| 200 | cgs_set_clockgating_state( | 197 | AMD_PG_STATE_GATE); |
| 201 | hwmgr->device, | 198 | cgs_set_clockgating_state( |
| 202 | AMD_IP_BLOCK_TYPE_VCE, | 199 | hwmgr->device, |
| 203 | AMD_CG_STATE_GATE); | 200 | AMD_IP_BLOCK_TYPE_VCE, |
| 204 | cgs_set_powergating_state( | 201 | AMD_CG_STATE_GATE); |
| 205 | hwmgr->device, | 202 | cz_enable_disable_vce_dpm(hwmgr, false); |
| 206 | AMD_IP_BLOCK_TYPE_VCE, | 203 | cz_dpm_powerdown_vce(hwmgr); |
| 207 | AMD_PG_STATE_GATE); | 204 | cz_hwmgr->vce_power_gated = true; |
| 208 | cz_enable_disable_vce_dpm(hwmgr, false); | ||
| 209 | cz_dpm_powerdown_vce(hwmgr); | ||
| 210 | cz_hwmgr->vce_power_gated = true; | ||
| 211 | } else { | ||
| 212 | cz_dpm_powerup_vce(hwmgr); | ||
| 213 | cz_hwmgr->vce_power_gated = false; | ||
| 214 | cgs_set_powergating_state( | ||
| 215 | hwmgr->device, | ||
| 216 | AMD_IP_BLOCK_TYPE_VCE, | ||
| 217 | AMD_CG_STATE_UNGATE); | ||
| 218 | cgs_set_clockgating_state( | ||
| 219 | hwmgr->device, | ||
| 220 | AMD_IP_BLOCK_TYPE_VCE, | ||
| 221 | AMD_PG_STATE_UNGATE); | ||
| 222 | cz_dpm_update_vce_dpm(hwmgr); | ||
| 223 | cz_enable_disable_vce_dpm(hwmgr, true); | ||
| 224 | return 0; | ||
| 225 | } | ||
| 226 | } | ||
| 227 | } else { | 205 | } else { |
| 228 | cz_hwmgr->vce_power_gated = bgate; | 206 | cz_dpm_powerup_vce(hwmgr); |
| 207 | cz_hwmgr->vce_power_gated = false; | ||
| 208 | cgs_set_clockgating_state( | ||
| 209 | hwmgr->device, | ||
| 210 | AMD_IP_BLOCK_TYPE_VCE, | ||
| 211 | AMD_PG_STATE_UNGATE); | ||
| 212 | cgs_set_powergating_state( | ||
| 213 | hwmgr->device, | ||
| 214 | AMD_IP_BLOCK_TYPE_VCE, | ||
| 215 | AMD_CG_STATE_UNGATE); | ||
| 229 | cz_dpm_update_vce_dpm(hwmgr); | 216 | cz_dpm_update_vce_dpm(hwmgr); |
| 230 | cz_enable_disable_vce_dpm(hwmgr, !bgate); | 217 | cz_enable_disable_vce_dpm(hwmgr, true); |
| 231 | return 0; | 218 | return 0; |
| 232 | } | 219 | } |
| 233 | 220 | ||
| 234 | if (!cz_hwmgr->vce_power_gated) | ||
| 235 | cz_dpm_update_vce_dpm(hwmgr); | ||
| 236 | |||
| 237 | return 0; | 221 | return 0; |
| 238 | } | 222 | } |
| 239 | 223 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 4b0a94cc995e..953e0c9ad7cd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | |||
| @@ -1396,3 +1396,25 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, | |||
| 1396 | 1396 | ||
| 1397 | return 0; | 1397 | return 0; |
| 1398 | } | 1398 | } |
| 1399 | |||
| 1400 | int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, | ||
| 1401 | uint8_t *svd_gpio_id, uint8_t *svc_gpio_id, | ||
| 1402 | uint16_t *load_line) | ||
| 1403 | { | ||
| 1404 | ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = | ||
| 1405 | (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); | ||
| 1406 | |||
| 1407 | const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; | ||
| 1408 | |||
| 1409 | PP_ASSERT_WITH_CODE((NULL != voltage_info), | ||
| 1410 | "Could not find Voltage Table in BIOS.", return -EINVAL); | ||
| 1411 | |||
| 1412 | voltage_object = atomctrl_lookup_voltage_type_v3 | ||
| 1413 | (voltage_info, voltage_type, VOLTAGE_OBJ_SVID2); | ||
| 1414 | |||
| 1415 | *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId; | ||
| 1416 | *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId; | ||
| 1417 | *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI; | ||
| 1418 | |||
| 1419 | return 0; | ||
| 1420 | } | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index fc898afce002..e9fe2e84006b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | |||
| @@ -311,5 +311,8 @@ extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_a | |||
| 311 | 311 | ||
| 312 | extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param); | 312 | extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param); |
| 313 | 313 | ||
| 314 | extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, | ||
| 315 | uint8_t *svd_gpio_id, uint8_t *svc_gpio_id, | ||
| 316 | uint16_t *load_line); | ||
| 314 | #endif | 317 | #endif |
| 315 | 318 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index a1fc4fcac1e0..8cf71f3c6d0e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c | |||
| @@ -147,22 +147,22 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 147 | data->uvd_power_gated = bgate; | 147 | data->uvd_power_gated = bgate; |
| 148 | 148 | ||
| 149 | if (bgate) { | 149 | if (bgate) { |
| 150 | cgs_set_clockgating_state(hwmgr->device, | ||
| 151 | AMD_IP_BLOCK_TYPE_UVD, | ||
| 152 | AMD_CG_STATE_GATE); | ||
| 153 | cgs_set_powergating_state(hwmgr->device, | 150 | cgs_set_powergating_state(hwmgr->device, |
| 154 | AMD_IP_BLOCK_TYPE_UVD, | 151 | AMD_IP_BLOCK_TYPE_UVD, |
| 155 | AMD_PG_STATE_GATE); | 152 | AMD_PG_STATE_GATE); |
| 153 | cgs_set_clockgating_state(hwmgr->device, | ||
| 154 | AMD_IP_BLOCK_TYPE_UVD, | ||
| 155 | AMD_CG_STATE_GATE); | ||
| 156 | smu7_update_uvd_dpm(hwmgr, true); | 156 | smu7_update_uvd_dpm(hwmgr, true); |
| 157 | smu7_powerdown_uvd(hwmgr); | 157 | smu7_powerdown_uvd(hwmgr); |
| 158 | } else { | 158 | } else { |
| 159 | smu7_powerup_uvd(hwmgr); | 159 | smu7_powerup_uvd(hwmgr); |
| 160 | cgs_set_powergating_state(hwmgr->device, | ||
| 161 | AMD_IP_BLOCK_TYPE_UVD, | ||
| 162 | AMD_CG_STATE_UNGATE); | ||
| 163 | cgs_set_clockgating_state(hwmgr->device, | 160 | cgs_set_clockgating_state(hwmgr->device, |
| 164 | AMD_IP_BLOCK_TYPE_UVD, | 161 | AMD_IP_BLOCK_TYPE_UVD, |
| 165 | AMD_CG_STATE_UNGATE); | 162 | AMD_CG_STATE_UNGATE); |
| 163 | cgs_set_powergating_state(hwmgr->device, | ||
| 164 | AMD_IP_BLOCK_TYPE_UVD, | ||
| 165 | AMD_CG_STATE_UNGATE); | ||
| 166 | smu7_update_uvd_dpm(hwmgr, false); | 166 | smu7_update_uvd_dpm(hwmgr, false); |
| 167 | } | 167 | } |
| 168 | 168 | ||
| @@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 173 | { | 173 | { |
| 174 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 174 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 175 | 175 | ||
| 176 | if (data->vce_power_gated == bgate) | ||
| 177 | return 0; | ||
| 178 | |||
| 179 | data->vce_power_gated = bgate; | 176 | data->vce_power_gated = bgate; |
| 180 | 177 | ||
| 181 | if (bgate) { | 178 | if (bgate) { |
| 179 | cgs_set_powergating_state(hwmgr->device, | ||
| 180 | AMD_IP_BLOCK_TYPE_VCE, | ||
| 181 | AMD_PG_STATE_UNGATE); | ||
| 182 | cgs_set_clockgating_state(hwmgr->device, | 182 | cgs_set_clockgating_state(hwmgr->device, |
| 183 | AMD_IP_BLOCK_TYPE_VCE, | 183 | AMD_IP_BLOCK_TYPE_VCE, |
| 184 | AMD_CG_STATE_GATE); | 184 | AMD_CG_STATE_GATE); |
| @@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 186 | smu7_powerdown_vce(hwmgr); | 186 | smu7_powerdown_vce(hwmgr); |
| 187 | } else { | 187 | } else { |
| 188 | smu7_powerup_vce(hwmgr); | 188 | smu7_powerup_vce(hwmgr); |
| 189 | smu7_update_vce_dpm(hwmgr, false); | ||
| 190 | cgs_set_clockgating_state(hwmgr->device, | 189 | cgs_set_clockgating_state(hwmgr->device, |
| 191 | AMD_IP_BLOCK_TYPE_VCE, | 190 | AMD_IP_BLOCK_TYPE_VCE, |
| 192 | AMD_CG_STATE_UNGATE); | 191 | AMD_CG_STATE_UNGATE); |
| 192 | cgs_set_powergating_state(hwmgr->device, | ||
| 193 | AMD_IP_BLOCK_TYPE_VCE, | ||
| 194 | AMD_PG_STATE_UNGATE); | ||
| 195 | smu7_update_vce_dpm(hwmgr, false); | ||
| 193 | } | 196 | } |
| 194 | return 0; | 197 | return 0; |
| 195 | } | 198 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 0a6c833720df..f75ee33ec5bb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
| @@ -1383,6 +1383,15 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
| 1383 | data->force_pcie_gen = PP_PCIEGenInvalid; | 1383 | data->force_pcie_gen = PP_PCIEGenInvalid; |
| 1384 | data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; | 1384 | data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; |
| 1385 | 1385 | ||
| 1386 | if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) { | ||
| 1387 | uint8_t tmp1, tmp2; | ||
| 1388 | uint16_t tmp3 = 0; | ||
| 1389 | atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, | ||
| 1390 | &tmp3); | ||
| 1391 | tmp3 = (tmp3 >> 5) & 0x3; | ||
| 1392 | data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; | ||
| 1393 | } | ||
| 1394 | |||
| 1386 | data->fast_watermark_threshold = 100; | 1395 | data->fast_watermark_threshold = 100; |
| 1387 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | 1396 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, |
| 1388 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) | 1397 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) |
| @@ -2624,6 +2633,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, | |||
| 2624 | smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); | 2633 | smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); |
| 2625 | smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); | 2634 | smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); |
| 2626 | smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); | 2635 | smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); |
| 2636 | |||
| 2627 | break; | 2637 | break; |
| 2628 | case AMD_DPM_FORCED_LEVEL_MANUAL: | 2638 | case AMD_DPM_FORCED_LEVEL_MANUAL: |
| 2629 | hwmgr->dpm_level = level; | 2639 | hwmgr->dpm_level = level; |
| @@ -2633,9 +2643,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, | |||
| 2633 | break; | 2643 | break; |
| 2634 | } | 2644 | } |
| 2635 | 2645 | ||
| 2636 | if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH)) | 2646 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 2637 | smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); | 2647 | smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); |
| 2638 | else | 2648 | else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 2639 | smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); | 2649 | smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); |
| 2640 | 2650 | ||
| 2641 | return 0; | 2651 | return 0; |
| @@ -4397,16 +4407,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) | |||
| 4397 | if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) | 4407 | if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) |
| 4398 | return -EINVAL; | 4408 | return -EINVAL; |
| 4399 | dep_sclk_table = table_info->vdd_dep_on_sclk; | 4409 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
| 4400 | for (i = 0; i < dep_sclk_table->count; i++) { | 4410 | for (i = 0; i < dep_sclk_table->count; i++) |
| 4401 | clocks->clock[i] = dep_sclk_table->entries[i].clk; | 4411 | clocks->clock[i] = dep_sclk_table->entries[i].clk; |
| 4402 | clocks->count++; | 4412 | clocks->count = dep_sclk_table->count; |
| 4403 | } | ||
| 4404 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { | 4413 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 4405 | sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; | 4414 | sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; |
| 4406 | for (i = 0; i < sclk_table->count; i++) { | 4415 | for (i = 0; i < sclk_table->count; i++) |
| 4407 | clocks->clock[i] = sclk_table->entries[i].clk; | 4416 | clocks->clock[i] = sclk_table->entries[i].clk; |
| 4408 | clocks->count++; | 4417 | clocks->count = sclk_table->count; |
| 4409 | } | ||
| 4410 | } | 4418 | } |
| 4411 | 4419 | ||
| 4412 | return 0; | 4420 | return 0; |
| @@ -4440,14 +4448,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) | |||
| 4440 | clocks->clock[i] = dep_mclk_table->entries[i].clk; | 4448 | clocks->clock[i] = dep_mclk_table->entries[i].clk; |
| 4441 | clocks->latency[i] = smu7_get_mem_latency(hwmgr, | 4449 | clocks->latency[i] = smu7_get_mem_latency(hwmgr, |
| 4442 | dep_mclk_table->entries[i].clk); | 4450 | dep_mclk_table->entries[i].clk); |
| 4443 | clocks->count++; | ||
| 4444 | } | 4451 | } |
| 4452 | clocks->count = dep_mclk_table->count; | ||
| 4445 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { | 4453 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 4446 | mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; | 4454 | mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; |
| 4447 | for (i = 0; i < mclk_table->count; i++) { | 4455 | for (i = 0; i < mclk_table->count; i++) |
| 4448 | clocks->clock[i] = mclk_table->entries[i].clk; | 4456 | clocks->clock[i] = mclk_table->entries[i].clk; |
| 4449 | clocks->count++; | 4457 | clocks->count = mclk_table->count; |
| 4450 | } | ||
| 4451 | } | 4458 | } |
| 4452 | return 0; | 4459 | return 0; |
| 4453 | } | 4460 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index 27e7f76ad8a6..f221e17b67e7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | |||
| @@ -268,7 +268,7 @@ struct smu7_hwmgr { | |||
| 268 | uint32_t fast_watermark_threshold; | 268 | uint32_t fast_watermark_threshold; |
| 269 | 269 | ||
| 270 | /* ---- Phase Shedding ---- */ | 270 | /* ---- Phase Shedding ---- */ |
| 271 | bool vddc_phase_shed_control; | 271 | uint8_t vddc_phase_shed_control; |
| 272 | 272 | ||
| 273 | /* ---- DI/DT ---- */ | 273 | /* ---- DI/DT ---- */ |
| 274 | struct smu7_display_timing display_timing; | 274 | struct smu7_display_timing display_timing; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 3341c0fbd069..1dc31aa72781 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | |||
| @@ -477,6 +477,151 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = { | |||
| 477 | { 0xFFFFFFFF } | 477 | { 0xFFFFFFFF } |
| 478 | }; | 478 | }; |
| 479 | 479 | ||
| 480 | static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = | ||
| 481 | { | ||
| 482 | /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ||
| 483 | * Offset Mask Shift Value Type | ||
| 484 | * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ||
| 485 | */ | ||
| 486 | /* DIDT_SQ */ | ||
| 487 | { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x004c, GPU_CONFIGREG_DIDT_IND }, | ||
| 488 | { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00d0, GPU_CONFIGREG_DIDT_IND }, | ||
| 489 | { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0069, GPU_CONFIGREG_DIDT_IND }, | ||
| 490 | { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x0048, GPU_CONFIGREG_DIDT_IND }, | ||
| 491 | |||
| 492 | { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x005f, GPU_CONFIGREG_DIDT_IND }, | ||
| 493 | { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x007a, GPU_CONFIGREG_DIDT_IND }, | ||
| 494 | { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x001f, GPU_CONFIGREG_DIDT_IND }, | ||
| 495 | { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x002d, GPU_CONFIGREG_DIDT_IND }, | ||
| 496 | |||
| 497 | { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x0088, GPU_CONFIGREG_DIDT_IND }, | ||
| 498 | { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 499 | { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 500 | { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 501 | |||
| 502 | { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 503 | { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, | ||
| 504 | |||
| 505 | { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 506 | { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, | ||
| 507 | |||
| 508 | { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, | ||
| 509 | { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 510 | { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, | ||
| 511 | { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 512 | { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 513 | { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 514 | |||
| 515 | { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 516 | { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 517 | { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 518 | { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, | ||
| 519 | { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 520 | |||
| 521 | { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 522 | { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, | ||
| 523 | { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, | ||
| 524 | { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 525 | |||
| 526 | { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 527 | { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 528 | { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 529 | { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 530 | { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 531 | { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, | ||
| 532 | { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, | ||
| 533 | { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 534 | |||
| 535 | /* DIDT_TD */ | ||
| 536 | { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, | ||
| 537 | { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, | ||
| 538 | { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, | ||
| 539 | { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, | ||
| 540 | |||
| 541 | { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, | ||
| 542 | { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, | ||
| 543 | { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 544 | { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 545 | |||
| 546 | { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 547 | { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, | ||
| 548 | |||
| 549 | { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 550 | { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, | ||
| 551 | |||
| 552 | { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, | ||
| 553 | { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 554 | { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, | ||
| 555 | { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 556 | { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 557 | { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 558 | |||
| 559 | { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 560 | { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 561 | { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 562 | { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, | ||
| 563 | { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 564 | |||
| 565 | { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 566 | { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, | ||
| 567 | { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, | ||
| 568 | { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 569 | |||
| 570 | { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 571 | { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 572 | { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 573 | { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 574 | { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 575 | { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, | ||
| 576 | { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, | ||
| 577 | { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 578 | |||
| 579 | /* DIDT_TCP */ | ||
| 580 | { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, | ||
| 581 | { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, | ||
| 582 | { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 583 | { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, | ||
| 584 | |||
| 585 | { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, | ||
| 586 | { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 587 | { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 588 | { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 589 | |||
| 590 | { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 591 | { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, | ||
| 592 | |||
| 593 | { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 594 | { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, | ||
| 595 | |||
| 596 | { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, | ||
| 597 | { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 598 | { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, | ||
| 599 | { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 600 | { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 601 | { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 602 | |||
| 603 | { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 604 | { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 605 | { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 606 | { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND }, | ||
| 607 | { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 608 | |||
| 609 | { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 610 | { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, | ||
| 611 | { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, | ||
| 612 | { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 613 | |||
| 614 | { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, | ||
| 615 | { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 616 | { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 617 | { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 618 | { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 619 | { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, | ||
| 620 | { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, | ||
| 621 | { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, | ||
| 622 | |||
| 623 | { 0xFFFFFFFF } /* End of list */ | ||
| 624 | }; | ||
| 480 | 625 | ||
| 481 | static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) | 626 | static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) |
| 482 | { | 627 | { |
| @@ -630,7 +775,10 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) | |||
| 630 | } else if (hwmgr->chip_id == CHIP_POLARIS11) { | 775 | } else if (hwmgr->chip_id == CHIP_POLARIS11) { |
| 631 | result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); | 776 | result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); |
| 632 | PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); | 777 | PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); |
| 633 | result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); | 778 | if (hwmgr->smumgr->is_kicker) |
| 779 | result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker); | ||
| 780 | else | ||
| 781 | result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); | ||
| 634 | PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); | 782 | PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); |
| 635 | } else if (hwmgr->chip_id == CHIP_POLARIS12) { | 783 | } else if (hwmgr->chip_id == CHIP_POLARIS12) { |
| 636 | result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); | 784 | result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 9b6531bd6350..7c318a95e0c2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h | |||
| @@ -137,6 +137,7 @@ struct pp_smumgr { | |||
| 137 | uint32_t usec_timeout; | 137 | uint32_t usec_timeout; |
| 138 | bool reload_fw; | 138 | bool reload_fw; |
| 139 | const struct pp_smumgr_func *smumgr_funcs; | 139 | const struct pp_smumgr_func *smumgr_funcs; |
| 140 | bool is_kicker; | ||
| 140 | }; | 141 | }; |
| 141 | 142 | ||
| 142 | extern int smum_early_init(struct pp_instance *handle); | 143 | extern int smum_early_init(struct pp_instance *handle); |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 0e26900e459e..80e2329a1b9e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c | |||
| @@ -494,6 +494,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, | |||
| 494 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 494 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 495 | struct phm_ppt_v1_information *table_info = | 495 | struct phm_ppt_v1_information *table_info = |
| 496 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 496 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 497 | struct pp_smumgr *smumgr = hwmgr->smumgr; | ||
| 497 | 498 | ||
| 498 | state->CcPwrDynRm = 0; | 499 | state->CcPwrDynRm = 0; |
| 499 | state->CcPwrDynRm1 = 0; | 500 | state->CcPwrDynRm1 = 0; |
| @@ -502,7 +503,10 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, | |||
| 502 | state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * | 503 | state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * |
| 503 | VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); | 504 | VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); |
| 504 | 505 | ||
| 505 | state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; | 506 | if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker) |
| 507 | state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; | ||
| 508 | else | ||
| 509 | state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; | ||
| 506 | 510 | ||
| 507 | CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); | 511 | CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); |
| 508 | CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); | 512 | CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 6749fbe26c74..35ac27681415 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | |||
| @@ -533,6 +533,8 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) | |||
| 533 | cgs_get_firmware_info(smumgr->device, | 533 | cgs_get_firmware_info(smumgr->device, |
| 534 | smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); | 534 | smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); |
| 535 | 535 | ||
| 536 | smumgr->is_kicker = info.is_kicker; | ||
| 537 | |||
| 536 | result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); | 538 | result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); |
| 537 | 539 | ||
| 538 | return result; | 540 | return result; |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 60c36928284c..c0956a4207a9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |||
| @@ -37,8 +37,10 @@ MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); | |||
| 37 | MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); | 37 | MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); |
| 38 | MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); | 38 | MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); |
| 39 | MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); | 39 | MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); |
| 40 | MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); | ||
| 40 | MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); | 41 | MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); |
| 41 | MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); | 42 | MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); |
| 43 | MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); | ||
| 42 | MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); | 44 | MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); |
| 43 | 45 | ||
| 44 | 46 | ||
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 1051181d8c0d..5a8fa1c85229 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h | |||
| @@ -114,6 +114,7 @@ struct ast_private { | |||
| 114 | struct ttm_bo_kmap_obj cache_kmap; | 114 | struct ttm_bo_kmap_obj cache_kmap; |
| 115 | int next_cursor; | 115 | int next_cursor; |
| 116 | bool support_wide_screen; | 116 | bool support_wide_screen; |
| 117 | bool DisableP2A; | ||
| 117 | 118 | ||
| 118 | enum ast_tx_chip tx_chip_type; | 119 | enum ast_tx_chip tx_chip_type; |
| 119 | u8 dp501_maxclk; | 120 | u8 dp501_maxclk; |
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 5992ed2166ec..993909430736 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c | |||
| @@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) | |||
| 124 | } else | 124 | } else |
| 125 | *need_post = false; | 125 | *need_post = false; |
| 126 | 126 | ||
| 127 | /* Check P2A Access */ | ||
| 128 | ast->DisableP2A = true; | ||
| 129 | data = ast_read32(ast, 0xf004); | ||
| 130 | if (data != 0xFFFFFFFF) | ||
| 131 | ast->DisableP2A = false; | ||
| 132 | |||
| 127 | /* Check if we support wide screen */ | 133 | /* Check if we support wide screen */ |
| 128 | switch (ast->chip) { | 134 | switch (ast->chip) { |
| 129 | case AST1180: | 135 | case AST1180: |
| @@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) | |||
| 140 | ast->support_wide_screen = true; | 146 | ast->support_wide_screen = true; |
| 141 | else { | 147 | else { |
| 142 | ast->support_wide_screen = false; | 148 | ast->support_wide_screen = false; |
| 143 | /* Read SCU7c (silicon revision register) */ | 149 | if (ast->DisableP2A == false) { |
| 144 | ast_write32(ast, 0xf004, 0x1e6e0000); | 150 | /* Read SCU7c (silicon revision register) */ |
| 145 | ast_write32(ast, 0xf000, 0x1); | 151 | ast_write32(ast, 0xf004, 0x1e6e0000); |
| 146 | data = ast_read32(ast, 0x1207c); | 152 | ast_write32(ast, 0xf000, 0x1); |
| 147 | data &= 0x300; | 153 | data = ast_read32(ast, 0x1207c); |
| 148 | if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ | 154 | data &= 0x300; |
| 149 | ast->support_wide_screen = true; | 155 | if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ |
| 150 | if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ | 156 | ast->support_wide_screen = true; |
| 151 | ast->support_wide_screen = true; | 157 | if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ |
| 158 | ast->support_wide_screen = true; | ||
| 159 | } | ||
| 152 | } | 160 | } |
| 153 | break; | 161 | break; |
| 154 | } | 162 | } |
| @@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev) | |||
| 216 | uint32_t data, data2; | 224 | uint32_t data, data2; |
| 217 | uint32_t denum, num, div, ref_pll; | 225 | uint32_t denum, num, div, ref_pll; |
| 218 | 226 | ||
| 219 | ast_write32(ast, 0xf004, 0x1e6e0000); | 227 | if (ast->DisableP2A) |
| 220 | ast_write32(ast, 0xf000, 0x1); | 228 | { |
| 221 | |||
| 222 | |||
| 223 | ast_write32(ast, 0x10000, 0xfc600309); | ||
| 224 | |||
| 225 | do { | ||
| 226 | if (pci_channel_offline(dev->pdev)) | ||
| 227 | return -EIO; | ||
| 228 | } while (ast_read32(ast, 0x10000) != 0x01); | ||
| 229 | data = ast_read32(ast, 0x10004); | ||
| 230 | |||
| 231 | if (data & 0x40) | ||
| 232 | ast->dram_bus_width = 16; | 229 | ast->dram_bus_width = 16; |
| 230 | ast->dram_type = AST_DRAM_1Gx16; | ||
| 231 | ast->mclk = 396; | ||
| 232 | } | ||
| 233 | else | 233 | else |
| 234 | ast->dram_bus_width = 32; | 234 | { |
| 235 | ast_write32(ast, 0xf004, 0x1e6e0000); | ||
| 236 | ast_write32(ast, 0xf000, 0x1); | ||
| 237 | data = ast_read32(ast, 0x10004); | ||
| 238 | |||
| 239 | if (data & 0x40) | ||
| 240 | ast->dram_bus_width = 16; | ||
| 241 | else | ||
| 242 | ast->dram_bus_width = 32; | ||
| 243 | |||
| 244 | if (ast->chip == AST2300 || ast->chip == AST2400) { | ||
| 245 | switch (data & 0x03) { | ||
| 246 | case 0: | ||
| 247 | ast->dram_type = AST_DRAM_512Mx16; | ||
| 248 | break; | ||
| 249 | default: | ||
| 250 | case 1: | ||
| 251 | ast->dram_type = AST_DRAM_1Gx16; | ||
| 252 | break; | ||
| 253 | case 2: | ||
| 254 | ast->dram_type = AST_DRAM_2Gx16; | ||
| 255 | break; | ||
| 256 | case 3: | ||
| 257 | ast->dram_type = AST_DRAM_4Gx16; | ||
| 258 | break; | ||
| 259 | } | ||
| 260 | } else { | ||
| 261 | switch (data & 0x0c) { | ||
| 262 | case 0: | ||
| 263 | case 4: | ||
| 264 | ast->dram_type = AST_DRAM_512Mx16; | ||
| 265 | break; | ||
| 266 | case 8: | ||
| 267 | if (data & 0x40) | ||
| 268 | ast->dram_type = AST_DRAM_1Gx16; | ||
| 269 | else | ||
| 270 | ast->dram_type = AST_DRAM_512Mx32; | ||
| 271 | break; | ||
| 272 | case 0xc: | ||
| 273 | ast->dram_type = AST_DRAM_1Gx32; | ||
| 274 | break; | ||
| 275 | } | ||
| 276 | } | ||
| 235 | 277 | ||
| 236 | if (ast->chip == AST2300 || ast->chip == AST2400) { | 278 | data = ast_read32(ast, 0x10120); |
| 237 | switch (data & 0x03) { | 279 | data2 = ast_read32(ast, 0x10170); |
| 238 | case 0: | 280 | if (data2 & 0x2000) |
| 239 | ast->dram_type = AST_DRAM_512Mx16; | 281 | ref_pll = 14318; |
| 240 | break; | 282 | else |
| 241 | default: | 283 | ref_pll = 12000; |
| 242 | case 1: | 284 | |
| 243 | ast->dram_type = AST_DRAM_1Gx16; | 285 | denum = data & 0x1f; |
| 244 | break; | 286 | num = (data & 0x3fe0) >> 5; |
| 245 | case 2: | 287 | data = (data & 0xc000) >> 14; |
| 246 | ast->dram_type = AST_DRAM_2Gx16; | 288 | switch (data) { |
| 247 | break; | ||
| 248 | case 3: | 289 | case 3: |
| 249 | ast->dram_type = AST_DRAM_4Gx16; | 290 | div = 0x4; |
| 250 | break; | ||
| 251 | } | ||
| 252 | } else { | ||
| 253 | switch (data & 0x0c) { | ||
| 254 | case 0: | ||
| 255 | case 4: | ||
| 256 | ast->dram_type = AST_DRAM_512Mx16; | ||
| 257 | break; | 291 | break; |
| 258 | case 8: | 292 | case 2: |
| 259 | if (data & 0x40) | 293 | case 1: |
| 260 | ast->dram_type = AST_DRAM_1Gx16; | 294 | div = 0x2; |
| 261 | else | ||
| 262 | ast->dram_type = AST_DRAM_512Mx32; | ||
| 263 | break; | 295 | break; |
| 264 | case 0xc: | 296 | default: |
| 265 | ast->dram_type = AST_DRAM_1Gx32; | 297 | div = 0x1; |
| 266 | break; | 298 | break; |
| 267 | } | 299 | } |
| 300 | ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); | ||
| 268 | } | 301 | } |
| 269 | |||
| 270 | data = ast_read32(ast, 0x10120); | ||
| 271 | data2 = ast_read32(ast, 0x10170); | ||
| 272 | if (data2 & 0x2000) | ||
| 273 | ref_pll = 14318; | ||
| 274 | else | ||
| 275 | ref_pll = 12000; | ||
| 276 | |||
| 277 | denum = data & 0x1f; | ||
| 278 | num = (data & 0x3fe0) >> 5; | ||
| 279 | data = (data & 0xc000) >> 14; | ||
| 280 | switch (data) { | ||
| 281 | case 3: | ||
| 282 | div = 0x4; | ||
| 283 | break; | ||
| 284 | case 2: | ||
| 285 | case 1: | ||
| 286 | div = 0x2; | ||
| 287 | break; | ||
| 288 | default: | ||
| 289 | div = 0x1; | ||
| 290 | break; | ||
| 291 | } | ||
| 292 | ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); | ||
| 293 | return 0; | 302 | return 0; |
| 294 | } | 303 | } |
| 295 | 304 | ||
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 810c51d92b99..5331ee1df086 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c | |||
| @@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev) | |||
| 379 | ast_open_key(ast); | 379 | ast_open_key(ast); |
| 380 | ast_set_def_ext_reg(dev); | 380 | ast_set_def_ext_reg(dev); |
| 381 | 381 | ||
| 382 | if (ast->chip == AST2300 || ast->chip == AST2400) | 382 | if (ast->DisableP2A == false) |
| 383 | ast_init_dram_2300(dev); | 383 | { |
| 384 | else | 384 | if (ast->chip == AST2300 || ast->chip == AST2400) |
| 385 | ast_init_dram_reg(dev); | 385 | ast_init_dram_2300(dev); |
| 386 | else | ||
| 387 | ast_init_dram_reg(dev); | ||
| 386 | 388 | ||
| 387 | ast_init_3rdtx(dev); | 389 | ast_init_3rdtx(dev); |
| 390 | } | ||
| 391 | else | ||
| 392 | { | ||
| 393 | if (ast->tx_chip_type != AST_TX_NONE) | ||
| 394 | ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ | ||
| 395 | } | ||
| 388 | } | 396 | } |
| 389 | 397 | ||
| 390 | /* AST 2300 DRAM settings */ | 398 | /* AST 2300 DRAM settings */ |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 5a0c7082c8f8..afec53832145 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -288,15 +288,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, | |||
| 288 | EXPORT_SYMBOL(drm_atomic_get_crtc_state); | 288 | EXPORT_SYMBOL(drm_atomic_get_crtc_state); |
| 289 | 289 | ||
| 290 | static void set_out_fence_for_crtc(struct drm_atomic_state *state, | 290 | static void set_out_fence_for_crtc(struct drm_atomic_state *state, |
| 291 | struct drm_crtc *crtc, s64 __user *fence_ptr) | 291 | struct drm_crtc *crtc, s32 __user *fence_ptr) |
| 292 | { | 292 | { |
| 293 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; | 293 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; |
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, | 296 | static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, |
| 297 | struct drm_crtc *crtc) | 297 | struct drm_crtc *crtc) |
| 298 | { | 298 | { |
| 299 | s64 __user *fence_ptr; | 299 | s32 __user *fence_ptr; |
| 300 | 300 | ||
| 301 | fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; | 301 | fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; |
| 302 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; | 302 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; |
| @@ -507,7 +507,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
| 507 | state->color_mgmt_changed |= replaced; | 507 | state->color_mgmt_changed |= replaced; |
| 508 | return ret; | 508 | return ret; |
| 509 | } else if (property == config->prop_out_fence_ptr) { | 509 | } else if (property == config->prop_out_fence_ptr) { |
| 510 | s64 __user *fence_ptr = u64_to_user_ptr(val); | 510 | s32 __user *fence_ptr = u64_to_user_ptr(val); |
| 511 | 511 | ||
| 512 | if (!fence_ptr) | 512 | if (!fence_ptr) |
| 513 | return 0; | 513 | return 0; |
| @@ -1914,7 +1914,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb); | |||
| 1914 | */ | 1914 | */ |
| 1915 | 1915 | ||
| 1916 | struct drm_out_fence_state { | 1916 | struct drm_out_fence_state { |
| 1917 | s64 __user *out_fence_ptr; | 1917 | s32 __user *out_fence_ptr; |
| 1918 | struct sync_file *sync_file; | 1918 | struct sync_file *sync_file; |
| 1919 | int fd; | 1919 | int fd; |
| 1920 | }; | 1920 | }; |
| @@ -1951,7 +1951,7 @@ static int prepare_crtc_signaling(struct drm_device *dev, | |||
| 1951 | return 0; | 1951 | return 0; |
| 1952 | 1952 | ||
| 1953 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { | 1953 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { |
| 1954 | u64 __user *fence_ptr; | 1954 | s32 __user *fence_ptr; |
| 1955 | 1955 | ||
| 1956 | fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); | 1956 | fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); |
| 1957 | 1957 | ||
| @@ -2031,13 +2031,16 @@ static void complete_crtc_signaling(struct drm_device *dev, | |||
| 2031 | } | 2031 | } |
| 2032 | 2032 | ||
| 2033 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { | 2033 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { |
| 2034 | struct drm_pending_vblank_event *event = crtc_state->event; | ||
| 2034 | /* | 2035 | /* |
| 2035 | * TEST_ONLY and PAGE_FLIP_EVENT are mutually | 2036 | * Free the allocated event. drm_atomic_helper_setup_commit |
| 2036 | * exclusive, if they weren't, this code should be | 2037 | * can allocate an event too, so only free it if it's ours |
| 2037 | * called on success for TEST_ONLY too. | 2038 | * to prevent a double free in drm_atomic_state_clear. |
| 2038 | */ | 2039 | */ |
| 2039 | if (crtc_state->event) | 2040 | if (event && (event->base.fence || event->base.file_priv)) { |
| 2040 | drm_event_cancel_free(dev, &crtc_state->event->base); | 2041 | drm_event_cancel_free(dev, &event->base); |
| 2042 | crtc_state->event = NULL; | ||
| 2043 | } | ||
| 2041 | } | 2044 | } |
| 2042 | 2045 | ||
| 2043 | if (!fence_state) | 2046 | if (!fence_state) |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5d9830f6a190..9203f3e933f7 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -370,7 +370,7 @@ mode_fixup(struct drm_atomic_state *state) | |||
| 370 | struct drm_connector *connector; | 370 | struct drm_connector *connector; |
| 371 | struct drm_connector_state *conn_state; | 371 | struct drm_connector_state *conn_state; |
| 372 | int i; | 372 | int i; |
| 373 | bool ret; | 373 | int ret; |
| 374 | 374 | ||
| 375 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 375 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 376 | if (!crtc_state->mode_changed && | 376 | if (!crtc_state->mode_changed && |
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index e4d2c8a49076..45464c8b797d 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c | |||
| @@ -378,6 +378,9 @@ int drm_connector_register(struct drm_connector *connector) | |||
| 378 | { | 378 | { |
| 379 | int ret = 0; | 379 | int ret = 0; |
| 380 | 380 | ||
| 381 | if (!connector->dev->registered) | ||
| 382 | return 0; | ||
| 383 | |||
| 381 | mutex_lock(&connector->mutex); | 384 | mutex_lock(&connector->mutex); |
| 382 | if (connector->registered) | 385 | if (connector->registered) |
| 383 | goto unlock; | 386 | goto unlock; |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 45ce224688ce..b5c6bb46a425 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -776,6 +776,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) | |||
| 776 | if (ret) | 776 | if (ret) |
| 777 | goto err_minors; | 777 | goto err_minors; |
| 778 | 778 | ||
| 779 | dev->registered = true; | ||
| 780 | |||
| 779 | if (dev->driver->load) { | 781 | if (dev->driver->load) { |
| 780 | ret = dev->driver->load(dev, flags); | 782 | ret = dev->driver->load(dev, flags); |
| 781 | if (ret) | 783 | if (ret) |
| @@ -823,6 +825,8 @@ void drm_dev_unregister(struct drm_device *dev) | |||
| 823 | 825 | ||
| 824 | drm_lastclose(dev); | 826 | drm_lastclose(dev); |
| 825 | 827 | ||
| 828 | dev->registered = false; | ||
| 829 | |||
| 826 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 830 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 827 | drm_modeset_unregister_all(dev); | 831 | drm_modeset_unregister_all(dev); |
| 828 | 832 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index c91240598471..0dd5da8c55e5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -858,6 +858,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) | |||
| 858 | } | 858 | } |
| 859 | fb_helper->fbdev = NULL; | 859 | fb_helper->fbdev = NULL; |
| 860 | 860 | ||
| 861 | cancel_work_sync(&fb_helper->resume_work); | ||
| 862 | cancel_work_sync(&fb_helper->dirty_work); | ||
| 863 | |||
| 861 | mutex_lock(&kernel_fb_helper_lock); | 864 | mutex_lock(&kernel_fb_helper_lock); |
| 862 | if (!list_empty(&fb_helper->kernel_fb_list)) { | 865 | if (!list_empty(&fb_helper->kernel_fb_list)) { |
| 863 | list_del(&fb_helper->kernel_fb_list); | 866 | list_del(&fb_helper->kernel_fb_list); |
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 3dfe3c886502..308d442a531b 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c | |||
| @@ -137,7 +137,7 @@ EXPORT_SYMBOL(drm_panel_detach); | |||
| 137 | * Return: A pointer to the panel registered for the specified device tree | 137 | * Return: A pointer to the panel registered for the specified device tree |
| 138 | * node or NULL if no panel matching the device tree node can be found. | 138 | * node or NULL if no panel matching the device tree node can be found. |
| 139 | */ | 139 | */ |
| 140 | struct drm_panel *of_drm_find_panel(struct device_node *np) | 140 | struct drm_panel *of_drm_find_panel(const struct device_node *np) |
| 141 | { | 141 | { |
| 142 | struct drm_panel *panel; | 142 | struct drm_panel *panel; |
| 143 | 143 | ||
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index d69af00bdd6a..0fd6f7a18364 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c | |||
| @@ -13,9 +13,11 @@ | |||
| 13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
| 14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
| 15 | #include <linux/component.h> | 15 | #include <linux/component.h> |
| 16 | #include <linux/mfd/syscon.h> | ||
| 16 | #include <linux/of_device.h> | 17 | #include <linux/of_device.h> |
| 17 | #include <linux/of_gpio.h> | 18 | #include <linux/of_gpio.h> |
| 18 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
| 20 | #include <linux/regmap.h> | ||
| 19 | 21 | ||
| 20 | #include <video/exynos5433_decon.h> | 22 | #include <video/exynos5433_decon.h> |
| 21 | 23 | ||
| @@ -25,6 +27,9 @@ | |||
| 25 | #include "exynos_drm_plane.h" | 27 | #include "exynos_drm_plane.h" |
| 26 | #include "exynos_drm_iommu.h" | 28 | #include "exynos_drm_iommu.h" |
| 27 | 29 | ||
| 30 | #define DSD_CFG_MUX 0x1004 | ||
| 31 | #define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13) | ||
| 32 | |||
| 28 | #define WINDOWS_NR 3 | 33 | #define WINDOWS_NR 3 |
| 29 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 | 34 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 |
| 30 | 35 | ||
| @@ -57,6 +62,7 @@ struct decon_context { | |||
| 57 | struct exynos_drm_plane planes[WINDOWS_NR]; | 62 | struct exynos_drm_plane planes[WINDOWS_NR]; |
| 58 | struct exynos_drm_plane_config configs[WINDOWS_NR]; | 63 | struct exynos_drm_plane_config configs[WINDOWS_NR]; |
| 59 | void __iomem *addr; | 64 | void __iomem *addr; |
| 65 | struct regmap *sysreg; | ||
| 60 | struct clk *clks[ARRAY_SIZE(decon_clks_name)]; | 66 | struct clk *clks[ARRAY_SIZE(decon_clks_name)]; |
| 61 | int pipe; | 67 | int pipe; |
| 62 | unsigned long flags; | 68 | unsigned long flags; |
| @@ -118,18 +124,29 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) | |||
| 118 | 124 | ||
| 119 | static void decon_setup_trigger(struct decon_context *ctx) | 125 | static void decon_setup_trigger(struct decon_context *ctx) |
| 120 | { | 126 | { |
| 121 | u32 val = !(ctx->out_type & I80_HW_TRG) | 127 | if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) |
| 122 | ? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | | 128 | return; |
| 123 | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN | 129 | |
| 124 | : TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | | 130 | if (!(ctx->out_type & I80_HW_TRG)) { |
| 125 | TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN; | 131 | writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN |
| 126 | writel(val, ctx->addr + DECON_TRIGCON); | 132 | | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, |
| 133 | ctx->addr + DECON_TRIGCON); | ||
| 134 | return; | ||
| 135 | } | ||
| 136 | |||
| 137 | writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | TRIGCON_HWTRIGMASK | ||
| 138 | | TRIGCON_HWTRIGEN, ctx->addr + DECON_TRIGCON); | ||
| 139 | |||
| 140 | if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX, | ||
| 141 | DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0)) | ||
| 142 | DRM_ERROR("Cannot update sysreg.\n"); | ||
| 127 | } | 143 | } |
| 128 | 144 | ||
| 129 | static void decon_commit(struct exynos_drm_crtc *crtc) | 145 | static void decon_commit(struct exynos_drm_crtc *crtc) |
| 130 | { | 146 | { |
| 131 | struct decon_context *ctx = crtc->ctx; | 147 | struct decon_context *ctx = crtc->ctx; |
| 132 | struct drm_display_mode *m = &crtc->base.mode; | 148 | struct drm_display_mode *m = &crtc->base.mode; |
| 149 | bool interlaced = false; | ||
| 133 | u32 val; | 150 | u32 val; |
| 134 | 151 | ||
| 135 | if (test_bit(BIT_SUSPENDED, &ctx->flags)) | 152 | if (test_bit(BIT_SUSPENDED, &ctx->flags)) |
| @@ -140,13 +157,16 @@ static void decon_commit(struct exynos_drm_crtc *crtc) | |||
| 140 | m->crtc_hsync_end = m->crtc_htotal - 92; | 157 | m->crtc_hsync_end = m->crtc_htotal - 92; |
| 141 | m->crtc_vsync_start = m->crtc_vdisplay + 1; | 158 | m->crtc_vsync_start = m->crtc_vdisplay + 1; |
| 142 | m->crtc_vsync_end = m->crtc_vsync_start + 1; | 159 | m->crtc_vsync_end = m->crtc_vsync_start + 1; |
| 160 | if (m->flags & DRM_MODE_FLAG_INTERLACE) | ||
| 161 | interlaced = true; | ||
| 143 | } | 162 | } |
| 144 | 163 | ||
| 145 | if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) | 164 | decon_setup_trigger(ctx); |
| 146 | decon_setup_trigger(ctx); | ||
| 147 | 165 | ||
| 148 | /* lcd on and use command if */ | 166 | /* lcd on and use command if */ |
| 149 | val = VIDOUT_LCD_ON; | 167 | val = VIDOUT_LCD_ON; |
| 168 | if (interlaced) | ||
| 169 | val |= VIDOUT_INTERLACE_EN_F; | ||
| 150 | if (ctx->out_type & IFTYPE_I80) { | 170 | if (ctx->out_type & IFTYPE_I80) { |
| 151 | val |= VIDOUT_COMMAND_IF; | 171 | val |= VIDOUT_COMMAND_IF; |
| 152 | } else { | 172 | } else { |
| @@ -155,15 +175,21 @@ static void decon_commit(struct exynos_drm_crtc *crtc) | |||
| 155 | 175 | ||
| 156 | writel(val, ctx->addr + DECON_VIDOUTCON0); | 176 | writel(val, ctx->addr + DECON_VIDOUTCON0); |
| 157 | 177 | ||
| 158 | val = VIDTCON2_LINEVAL(m->vdisplay - 1) | | 178 | if (interlaced) |
| 159 | VIDTCON2_HOZVAL(m->hdisplay - 1); | 179 | val = VIDTCON2_LINEVAL(m->vdisplay / 2 - 1) | |
| 180 | VIDTCON2_HOZVAL(m->hdisplay - 1); | ||
| 181 | else | ||
| 182 | val = VIDTCON2_LINEVAL(m->vdisplay - 1) | | ||
| 183 | VIDTCON2_HOZVAL(m->hdisplay - 1); | ||
| 160 | writel(val, ctx->addr + DECON_VIDTCON2); | 184 | writel(val, ctx->addr + DECON_VIDTCON2); |
| 161 | 185 | ||
| 162 | if (!(ctx->out_type & IFTYPE_I80)) { | 186 | if (!(ctx->out_type & IFTYPE_I80)) { |
| 163 | val = VIDTCON00_VBPD_F( | 187 | int vbp = m->crtc_vtotal - m->crtc_vsync_end; |
| 164 | m->crtc_vtotal - m->crtc_vsync_end - 1) | | 188 | int vfp = m->crtc_vsync_start - m->crtc_vdisplay; |
| 165 | VIDTCON00_VFPD_F( | 189 | |
| 166 | m->crtc_vsync_start - m->crtc_vdisplay - 1); | 190 | if (interlaced) |
| 191 | vbp = vbp / 2 - 1; | ||
| 192 | val = VIDTCON00_VBPD_F(vbp - 1) | VIDTCON00_VFPD_F(vfp - 1); | ||
| 167 | writel(val, ctx->addr + DECON_VIDTCON00); | 193 | writel(val, ctx->addr + DECON_VIDTCON00); |
| 168 | 194 | ||
| 169 | val = VIDTCON01_VSPW_F( | 195 | val = VIDTCON01_VSPW_F( |
| @@ -278,12 +304,22 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, | |||
| 278 | if (test_bit(BIT_SUSPENDED, &ctx->flags)) | 304 | if (test_bit(BIT_SUSPENDED, &ctx->flags)) |
| 279 | return; | 305 | return; |
| 280 | 306 | ||
| 281 | val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y); | 307 | if (crtc->base.mode.flags & DRM_MODE_FLAG_INTERLACE) { |
| 282 | writel(val, ctx->addr + DECON_VIDOSDxA(win)); | 308 | val = COORDINATE_X(state->crtc.x) | |
| 309 | COORDINATE_Y(state->crtc.y / 2); | ||
| 310 | writel(val, ctx->addr + DECON_VIDOSDxA(win)); | ||
| 311 | |||
| 312 | val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) | | ||
| 313 | COORDINATE_Y((state->crtc.y + state->crtc.h) / 2 - 1); | ||
| 314 | writel(val, ctx->addr + DECON_VIDOSDxB(win)); | ||
| 315 | } else { | ||
| 316 | val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y); | ||
| 317 | writel(val, ctx->addr + DECON_VIDOSDxA(win)); | ||
| 283 | 318 | ||
| 284 | val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) | | 319 | val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) | |
| 285 | COORDINATE_Y(state->crtc.y + state->crtc.h - 1); | 320 | COORDINATE_Y(state->crtc.y + state->crtc.h - 1); |
| 286 | writel(val, ctx->addr + DECON_VIDOSDxB(win)); | 321 | writel(val, ctx->addr + DECON_VIDOSDxB(win)); |
| 322 | } | ||
| 287 | 323 | ||
| 288 | val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | | 324 | val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | |
| 289 | VIDOSD_Wx_ALPHA_B_F(0x0); | 325 | VIDOSD_Wx_ALPHA_B_F(0x0); |
| @@ -355,8 +391,6 @@ static void decon_swreset(struct decon_context *ctx) | |||
| 355 | udelay(10); | 391 | udelay(10); |
| 356 | } | 392 | } |
| 357 | 393 | ||
| 358 | WARN(tries == 0, "failed to disable DECON\n"); | ||
| 359 | |||
| 360 | writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0); | 394 | writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0); |
| 361 | for (tries = 2000; tries; --tries) { | 395 | for (tries = 2000; tries; --tries) { |
| 362 | if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET) | 396 | if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET) |
| @@ -557,6 +591,13 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) | |||
| 557 | 591 | ||
| 558 | if (val) { | 592 | if (val) { |
| 559 | writel(val, ctx->addr + DECON_VIDINTCON1); | 593 | writel(val, ctx->addr + DECON_VIDINTCON1); |
| 594 | if (ctx->out_type & IFTYPE_HDMI) { | ||
| 595 | val = readl(ctx->addr + DECON_VIDOUTCON0); | ||
| 596 | val &= VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F; | ||
| 597 | if (val == | ||
| 598 | (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) | ||
| 599 | return IRQ_HANDLED; | ||
| 600 | } | ||
| 560 | drm_crtc_handle_vblank(&ctx->crtc->base); | 601 | drm_crtc_handle_vblank(&ctx->crtc->base); |
| 561 | } | 602 | } |
| 562 | 603 | ||
| @@ -637,6 +678,15 @@ static int exynos5433_decon_probe(struct platform_device *pdev) | |||
| 637 | ctx->out_type |= IFTYPE_I80; | 678 | ctx->out_type |= IFTYPE_I80; |
| 638 | } | 679 | } |
| 639 | 680 | ||
| 681 | if (ctx->out_type | I80_HW_TRG) { | ||
| 682 | ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, | ||
| 683 | "samsung,disp-sysreg"); | ||
| 684 | if (IS_ERR(ctx->sysreg)) { | ||
| 685 | dev_err(dev, "failed to get system register\n"); | ||
| 686 | return PTR_ERR(ctx->sysreg); | ||
| 687 | } | ||
| 688 | } | ||
| 689 | |||
| 640 | for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) { | 690 | for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) { |
| 641 | struct clk *clk; | 691 | struct clk *clk; |
| 642 | 692 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 745cfbdf6b39..a9fa444c6053 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -125,10 +125,8 @@ static struct fimd_driver_data exynos3_fimd_driver_data = { | |||
| 125 | .timing_base = 0x20000, | 125 | .timing_base = 0x20000, |
| 126 | .lcdblk_offset = 0x210, | 126 | .lcdblk_offset = 0x210, |
| 127 | .lcdblk_bypass_shift = 1, | 127 | .lcdblk_bypass_shift = 1, |
| 128 | .trg_type = I80_HW_TRG, | ||
| 129 | .has_shadowcon = 1, | 128 | .has_shadowcon = 1, |
| 130 | .has_vidoutcon = 1, | 129 | .has_vidoutcon = 1, |
| 131 | .has_trigger_per_te = 1, | ||
| 132 | }; | 130 | }; |
| 133 | 131 | ||
| 134 | static struct fimd_driver_data exynos4_fimd_driver_data = { | 132 | static struct fimd_driver_data exynos4_fimd_driver_data = { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 603d8425cca6..2b8bf2dd6387 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
| @@ -1683,7 +1683,7 @@ struct platform_driver g2d_driver = { | |||
| 1683 | .probe = g2d_probe, | 1683 | .probe = g2d_probe, |
| 1684 | .remove = g2d_remove, | 1684 | .remove = g2d_remove, |
| 1685 | .driver = { | 1685 | .driver = { |
| 1686 | .name = "s5p-g2d", | 1686 | .name = "exynos-drm-g2d", |
| 1687 | .owner = THIS_MODULE, | 1687 | .owner = THIS_MODULE, |
| 1688 | .pm = &g2d_pm_ops, | 1688 | .pm = &g2d_pm_ops, |
| 1689 | .of_match_table = exynos_g2d_match, | 1689 | .of_match_table = exynos_g2d_match, |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 752e8a3afc79..0814ed76445c 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/io.h> | 35 | #include <linux/io.h> |
| 36 | #include <linux/of_address.h> | 36 | #include <linux/of_address.h> |
| 37 | #include <linux/of_device.h> | 37 | #include <linux/of_device.h> |
| 38 | #include <linux/of_graph.h> | ||
| 38 | #include <linux/hdmi.h> | 39 | #include <linux/hdmi.h> |
| 39 | #include <linux/component.h> | 40 | #include <linux/component.h> |
| 40 | #include <linux/mfd/syscon.h> | 41 | #include <linux/mfd/syscon.h> |
| @@ -132,6 +133,7 @@ struct hdmi_context { | |||
| 132 | struct regulator_bulk_data regul_bulk[ARRAY_SIZE(supply)]; | 133 | struct regulator_bulk_data regul_bulk[ARRAY_SIZE(supply)]; |
| 133 | struct regulator *reg_hdmi_en; | 134 | struct regulator *reg_hdmi_en; |
| 134 | struct exynos_drm_clk phy_clk; | 135 | struct exynos_drm_clk phy_clk; |
| 136 | struct drm_bridge *bridge; | ||
| 135 | }; | 137 | }; |
| 136 | 138 | ||
| 137 | static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e) | 139 | static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e) |
| @@ -508,9 +510,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = { | |||
| 508 | { | 510 | { |
| 509 | .pixel_clock = 27000000, | 511 | .pixel_clock = 27000000, |
| 510 | .conf = { | 512 | .conf = { |
| 511 | 0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46, | 513 | 0x01, 0x51, 0x2d, 0x75, 0x01, 0x00, 0x88, 0x02, |
| 512 | 0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5, | 514 | 0x72, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac, |
| 513 | 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30, | 515 | 0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30, |
| 514 | 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, | 516 | 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, |
| 515 | }, | 517 | }, |
| 516 | }, | 518 | }, |
| @@ -518,9 +520,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = { | |||
| 518 | .pixel_clock = 27027000, | 520 | .pixel_clock = 27027000, |
| 519 | .conf = { | 521 | .conf = { |
| 520 | 0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3, | 522 | 0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3, |
| 521 | 0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5, | 523 | 0x71, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac, |
| 522 | 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30, | 524 | 0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30, |
| 523 | 0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, | 525 | 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, |
| 524 | }, | 526 | }, |
| 525 | }, | 527 | }, |
| 526 | { | 528 | { |
| @@ -586,6 +588,15 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = { | |||
| 586 | 0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40, | 588 | 0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40, |
| 587 | }, | 589 | }, |
| 588 | }, | 590 | }, |
| 591 | { | ||
| 592 | .pixel_clock = 297000000, | ||
| 593 | .conf = { | ||
| 594 | 0x01, 0x51, 0x3E, 0x05, 0x40, 0xF0, 0x88, 0xC2, | ||
| 595 | 0x52, 0x53, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC, | ||
| 596 | 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30, | ||
| 597 | 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, | ||
| 598 | }, | ||
| 599 | }, | ||
| 589 | }; | 600 | }; |
| 590 | 601 | ||
| 591 | static const char * const hdmi_clk_gates4[] = { | 602 | static const char * const hdmi_clk_gates4[] = { |
| @@ -787,7 +798,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata) | |||
| 787 | sizeof(buf)); | 798 | sizeof(buf)); |
| 788 | if (ret > 0) { | 799 | if (ret > 0) { |
| 789 | hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC); | 800 | hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC); |
| 790 | hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, ret); | 801 | hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, 3); |
| 802 | hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3); | ||
| 791 | } | 803 | } |
| 792 | 804 | ||
| 793 | ret = hdmi_audio_infoframe_init(&frm.audio); | 805 | ret = hdmi_audio_infoframe_init(&frm.audio); |
| @@ -911,7 +923,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder) | |||
| 911 | drm_connector_register(connector); | 923 | drm_connector_register(connector); |
| 912 | drm_mode_connector_attach_encoder(connector, encoder); | 924 | drm_mode_connector_attach_encoder(connector, encoder); |
| 913 | 925 | ||
| 914 | return 0; | 926 | if (hdata->bridge) { |
| 927 | encoder->bridge = hdata->bridge; | ||
| 928 | hdata->bridge->encoder = encoder; | ||
| 929 | ret = drm_bridge_attach(encoder, hdata->bridge, NULL); | ||
| 930 | if (ret) | ||
| 931 | DRM_ERROR("Failed to attach bridge\n"); | ||
| 932 | } | ||
| 933 | |||
| 934 | return ret; | ||
| 915 | } | 935 | } |
| 916 | 936 | ||
| 917 | static bool hdmi_mode_fixup(struct drm_encoder *encoder, | 937 | static bool hdmi_mode_fixup(struct drm_encoder *encoder, |
| @@ -1580,6 +1600,31 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable) | |||
| 1580 | hdmiphy_disable(hdata); | 1600 | hdmiphy_disable(hdata); |
| 1581 | } | 1601 | } |
| 1582 | 1602 | ||
| 1603 | static int hdmi_bridge_init(struct hdmi_context *hdata) | ||
| 1604 | { | ||
| 1605 | struct device *dev = hdata->dev; | ||
| 1606 | struct device_node *ep, *np; | ||
| 1607 | |||
| 1608 | ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1); | ||
| 1609 | if (!ep) | ||
| 1610 | return 0; | ||
| 1611 | |||
| 1612 | np = of_graph_get_remote_port_parent(ep); | ||
| 1613 | of_node_put(ep); | ||
| 1614 | if (!np) { | ||
| 1615 | DRM_ERROR("failed to get remote port parent"); | ||
| 1616 | return -EINVAL; | ||
| 1617 | } | ||
| 1618 | |||
| 1619 | hdata->bridge = of_drm_find_bridge(np); | ||
| 1620 | of_node_put(np); | ||
| 1621 | |||
| 1622 | if (!hdata->bridge) | ||
| 1623 | return -EPROBE_DEFER; | ||
| 1624 | |||
| 1625 | return 0; | ||
| 1626 | } | ||
| 1627 | |||
| 1583 | static int hdmi_resources_init(struct hdmi_context *hdata) | 1628 | static int hdmi_resources_init(struct hdmi_context *hdata) |
| 1584 | { | 1629 | { |
| 1585 | struct device *dev = hdata->dev; | 1630 | struct device *dev = hdata->dev; |
| @@ -1619,17 +1664,18 @@ static int hdmi_resources_init(struct hdmi_context *hdata) | |||
| 1619 | 1664 | ||
| 1620 | hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en"); | 1665 | hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en"); |
| 1621 | 1666 | ||
| 1622 | if (PTR_ERR(hdata->reg_hdmi_en) == -ENODEV) | 1667 | if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) { |
| 1623 | return 0; | 1668 | if (IS_ERR(hdata->reg_hdmi_en)) |
| 1669 | return PTR_ERR(hdata->reg_hdmi_en); | ||
| 1624 | 1670 | ||
| 1625 | if (IS_ERR(hdata->reg_hdmi_en)) | 1671 | ret = regulator_enable(hdata->reg_hdmi_en); |
| 1626 | return PTR_ERR(hdata->reg_hdmi_en); | 1672 | if (ret) { |
| 1627 | 1673 | DRM_ERROR("failed to enable hdmi-en regulator\n"); | |
| 1628 | ret = regulator_enable(hdata->reg_hdmi_en); | 1674 | return ret; |
| 1629 | if (ret) | 1675 | } |
| 1630 | DRM_ERROR("failed to enable hdmi-en regulator\n"); | 1676 | } |
| 1631 | 1677 | ||
| 1632 | return ret; | 1678 | return hdmi_bridge_init(hdata); |
| 1633 | } | 1679 | } |
| 1634 | 1680 | ||
| 1635 | static struct of_device_id hdmi_match_types[] = { | 1681 | static struct of_device_id hdmi_match_types[] = { |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c index 3194e544ee27..b3d70a63c5a3 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c | |||
| @@ -72,10 +72,8 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev) | |||
| 72 | return NULL; | 72 | return NULL; |
| 73 | 73 | ||
| 74 | tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL); | 74 | tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL); |
| 75 | if (!tcon) { | 75 | if (!tcon) |
| 76 | ret = -ENOMEM; | ||
| 77 | goto err_node_put; | 76 | goto err_node_put; |
| 78 | } | ||
| 79 | 77 | ||
| 80 | ret = fsl_tcon_init_regmap(dev, tcon, np); | 78 | ret = fsl_tcon_init_regmap(dev, tcon, np); |
| 81 | if (ret) { | 79 | if (ret) { |
| @@ -89,9 +87,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev) | |||
| 89 | goto err_node_put; | 87 | goto err_node_put; |
| 90 | } | 88 | } |
| 91 | 89 | ||
| 92 | of_node_put(np); | 90 | ret = clk_prepare_enable(tcon->ipg_clk); |
| 93 | clk_prepare_enable(tcon->ipg_clk); | 91 | if (ret) { |
| 92 | dev_err(dev, "Couldn't enable the TCON clock\n"); | ||
| 93 | goto err_node_put; | ||
| 94 | } | ||
| 94 | 95 | ||
| 96 | of_node_put(np); | ||
| 95 | dev_info(dev, "Using TCON in bypass mode\n"); | 97 | dev_info(dev, "Using TCON in bypass mode\n"); |
| 96 | 98 | ||
| 97 | return tcon; | 99 | return tcon; |
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 7311aeab16f7..3b6caaca9751 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c | |||
| @@ -49,20 +49,21 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) | |||
| 49 | if (high_gm) { | 49 | if (high_gm) { |
| 50 | node = &vgpu->gm.high_gm_node; | 50 | node = &vgpu->gm.high_gm_node; |
| 51 | size = vgpu_hidden_sz(vgpu); | 51 | size = vgpu_hidden_sz(vgpu); |
| 52 | start = gvt_hidden_gmadr_base(gvt); | 52 | start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); |
| 53 | end = gvt_hidden_gmadr_end(gvt); | 53 | end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); |
| 54 | flags = PIN_HIGH; | 54 | flags = PIN_HIGH; |
| 55 | } else { | 55 | } else { |
| 56 | node = &vgpu->gm.low_gm_node; | 56 | node = &vgpu->gm.low_gm_node; |
| 57 | size = vgpu_aperture_sz(vgpu); | 57 | size = vgpu_aperture_sz(vgpu); |
| 58 | start = gvt_aperture_gmadr_base(gvt); | 58 | start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); |
| 59 | end = gvt_aperture_gmadr_end(gvt); | 59 | end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); |
| 60 | flags = PIN_MAPPABLE; | 60 | flags = PIN_MAPPABLE; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | mutex_lock(&dev_priv->drm.struct_mutex); | 63 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 64 | ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, | 64 | ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, |
| 65 | size, 4096, I915_COLOR_UNEVICTABLE, | 65 | size, I915_GTT_PAGE_SIZE, |
| 66 | I915_COLOR_UNEVICTABLE, | ||
| 66 | start, end, flags); | 67 | start, end, flags); |
| 67 | mutex_unlock(&dev_priv->drm.struct_mutex); | 68 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 68 | if (ret) | 69 | if (ret) |
| @@ -254,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, | |||
| 254 | if (request > avail) | 255 | if (request > avail) |
| 255 | goto no_enough_resource; | 256 | goto no_enough_resource; |
| 256 | 257 | ||
| 257 | vgpu_aperture_sz(vgpu) = request; | 258 | vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); |
| 258 | 259 | ||
| 259 | item = "high GM space"; | 260 | item = "high GM space"; |
| 260 | max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; | 261 | max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; |
| @@ -265,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, | |||
| 265 | if (request > avail) | 266 | if (request > avail) |
| 266 | goto no_enough_resource; | 267 | goto no_enough_resource; |
| 267 | 268 | ||
| 268 | vgpu_hidden_sz(vgpu) = request; | 269 | vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); |
| 269 | 270 | ||
| 270 | item = "fence"; | 271 | item = "fence"; |
| 271 | max = gvt_fence_sz(gvt) - HOST_FENCE; | 272 | max = gvt_fence_sz(gvt) - HOST_FENCE; |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 9a4b23c3ee97..b9c8e2407682 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
| @@ -481,7 +481,6 @@ struct parser_exec_state { | |||
| 481 | (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) | 481 | (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) |
| 482 | 482 | ||
| 483 | static unsigned long bypass_scan_mask = 0; | 483 | static unsigned long bypass_scan_mask = 0; |
| 484 | static bool bypass_batch_buffer_scan = true; | ||
| 485 | 484 | ||
| 486 | /* ring ALL, type = 0 */ | 485 | /* ring ALL, type = 0 */ |
| 487 | static struct sub_op_bits sub_op_mi[] = { | 486 | static struct sub_op_bits sub_op_mi[] = { |
| @@ -1135,6 +1134,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, | |||
| 1135 | u32 dword2 = cmd_val(s, 2); | 1134 | u32 dword2 = cmd_val(s, 2); |
| 1136 | u32 plane = (dword0 & GENMASK(12, 8)) >> 8; | 1135 | u32 plane = (dword0 & GENMASK(12, 8)) >> 8; |
| 1137 | 1136 | ||
| 1137 | info->plane = PRIMARY_PLANE; | ||
| 1138 | |||
| 1138 | switch (plane) { | 1139 | switch (plane) { |
| 1139 | case MI_DISPLAY_FLIP_SKL_PLANE_1_A: | 1140 | case MI_DISPLAY_FLIP_SKL_PLANE_1_A: |
| 1140 | info->pipe = PIPE_A; | 1141 | info->pipe = PIPE_A; |
| @@ -1148,12 +1149,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, | |||
| 1148 | info->pipe = PIPE_C; | 1149 | info->pipe = PIPE_C; |
| 1149 | info->event = PRIMARY_C_FLIP_DONE; | 1150 | info->event = PRIMARY_C_FLIP_DONE; |
| 1150 | break; | 1151 | break; |
| 1152 | |||
| 1153 | case MI_DISPLAY_FLIP_SKL_PLANE_2_A: | ||
| 1154 | info->pipe = PIPE_A; | ||
| 1155 | info->event = SPRITE_A_FLIP_DONE; | ||
| 1156 | info->plane = SPRITE_PLANE; | ||
| 1157 | break; | ||
| 1158 | case MI_DISPLAY_FLIP_SKL_PLANE_2_B: | ||
| 1159 | info->pipe = PIPE_B; | ||
| 1160 | info->event = SPRITE_B_FLIP_DONE; | ||
| 1161 | info->plane = SPRITE_PLANE; | ||
| 1162 | break; | ||
| 1163 | case MI_DISPLAY_FLIP_SKL_PLANE_2_C: | ||
| 1164 | info->pipe = PIPE_C; | ||
| 1165 | info->event = SPRITE_C_FLIP_DONE; | ||
| 1166 | info->plane = SPRITE_PLANE; | ||
| 1167 | break; | ||
| 1168 | |||
| 1151 | default: | 1169 | default: |
| 1152 | gvt_err("unknown plane code %d\n", plane); | 1170 | gvt_err("unknown plane code %d\n", plane); |
| 1153 | return -EINVAL; | 1171 | return -EINVAL; |
| 1154 | } | 1172 | } |
| 1155 | 1173 | ||
| 1156 | info->pipe = PRIMARY_PLANE; | ||
| 1157 | info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; | 1174 | info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; |
| 1158 | info->tile_val = (dword1 & GENMASK(2, 0)); | 1175 | info->tile_val = (dword1 & GENMASK(2, 0)); |
| 1159 | info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; | 1176 | info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; |
| @@ -1525,9 +1542,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) | |||
| 1525 | { | 1542 | { |
| 1526 | struct intel_gvt *gvt = s->vgpu->gvt; | 1543 | struct intel_gvt *gvt = s->vgpu->gvt; |
| 1527 | 1544 | ||
| 1528 | if (bypass_batch_buffer_scan) | ||
| 1529 | return 0; | ||
| 1530 | |||
| 1531 | if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { | 1545 | if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { |
| 1532 | /* BDW decides privilege based on address space */ | 1546 | /* BDW decides privilege based on address space */ |
| 1533 | if (cmd_val(s, 0) & (1 << 8)) | 1547 | if (cmd_val(s, 0) & (1 << 8)) |
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index c0c884aeb30e..6d8fde880c39 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
| @@ -83,7 +83,7 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) | |||
| 83 | return 0; | 83 | return 0; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | /* EDID with 1024x768 as its resolution */ | 86 | /* EDID with 1920x1200 as its resolution */ |
| 87 | static unsigned char virtual_dp_monitor_edid[] = { | 87 | static unsigned char virtual_dp_monitor_edid[] = { |
| 88 | /*Header*/ | 88 | /*Header*/ |
| 89 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | 89 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, |
| @@ -97,11 +97,16 @@ static unsigned char virtual_dp_monitor_edid[] = { | |||
| 97 | 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, | 97 | 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, |
| 98 | /* Established Timings: maximum resolution is 1024x768 */ | 98 | /* Established Timings: maximum resolution is 1024x768 */ |
| 99 | 0x21, 0x08, 0x00, | 99 | 0x21, 0x08, 0x00, |
| 100 | /* Standard Timings. All invalid */ | 100 | /* |
| 101 | 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, | 101 | * Standard Timings. |
| 102 | 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, | 102 | * below new resolutions can be supported: |
| 103 | /* 18 Byte Data Blocks 1: invalid */ | 103 | * 1920x1080, 1280x720, 1280x960, 1280x1024, |
| 104 | 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, | 104 | * 1440x900, 1600x1200, 1680x1050 |
| 105 | */ | ||
| 106 | 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, | ||
| 107 | 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, | ||
| 108 | /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ | ||
| 109 | 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, | ||
| 105 | 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, | 110 | 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, |
| 106 | /* 18 Byte Data Blocks 2: invalid */ | 111 | /* 18 Byte Data Blocks 2: invalid */ |
| 107 | 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, | 112 | 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, |
| @@ -115,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = { | |||
| 115 | /* Extension Block Count */ | 120 | /* Extension Block Count */ |
| 116 | 0x00, | 121 | 0x00, |
| 117 | /* Checksum */ | 122 | /* Checksum */ |
| 118 | 0xef, | 123 | 0x45, |
| 119 | }; | 124 | }; |
| 120 | 125 | ||
| 121 | #define DPCD_HEADER_SIZE 0xb | 126 | #define DPCD_HEADER_SIZE 0xb |
| @@ -328,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu) | |||
| 328 | else | 333 | else |
| 329 | return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); | 334 | return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); |
| 330 | } | 335 | } |
| 336 | |||
| 337 | /** | ||
| 338 | * intel_vgpu_reset_display- reset vGPU virtual display emulation | ||
| 339 | * @vgpu: a vGPU | ||
| 340 | * | ||
| 341 | * This function is used to reset vGPU virtual display emulation stuffs | ||
| 342 | * | ||
| 343 | */ | ||
| 344 | void intel_vgpu_reset_display(struct intel_vgpu *vgpu) | ||
| 345 | { | ||
| 346 | emulate_monitor_status_change(vgpu); | ||
| 347 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index 7a60cb848268..8b234ea961f6 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h | |||
| @@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt); | |||
| 158 | void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); | 158 | void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); |
| 159 | 159 | ||
| 160 | int intel_vgpu_init_display(struct intel_vgpu *vgpu); | 160 | int intel_vgpu_init_display(struct intel_vgpu *vgpu); |
| 161 | void intel_vgpu_reset_display(struct intel_vgpu *vgpu); | ||
| 161 | void intel_vgpu_clean_display(struct intel_vgpu *vgpu); | 162 | void intel_vgpu_clean_display(struct intel_vgpu *vgpu); |
| 162 | 163 | ||
| 163 | #endif | 164 | #endif |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f32bb6f6495c..46eb9fd3c03f 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
| @@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload) | |||
| 364 | #define get_desc_from_elsp_dwords(ed, i) \ | 364 | #define get_desc_from_elsp_dwords(ed, i) \ |
| 365 | ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) | 365 | ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) |
| 366 | 366 | ||
| 367 | |||
| 368 | #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) | ||
| 369 | #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) | ||
| 370 | static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, | ||
| 371 | unsigned long add, int gmadr_bytes) | ||
| 372 | { | ||
| 373 | if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) | ||
| 374 | return -1; | ||
| 375 | |||
| 376 | *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & | ||
| 377 | BATCH_BUFFER_ADDR_MASK; | ||
| 378 | if (gmadr_bytes == 8) { | ||
| 379 | *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = | ||
| 380 | add & BATCH_BUFFER_ADDR_HIGH_MASK; | ||
| 381 | } | ||
| 382 | |||
| 383 | return 0; | ||
| 384 | } | ||
| 385 | |||
| 386 | static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) | 367 | static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) |
| 387 | { | 368 | { |
| 388 | int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; | 369 | const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; |
| 370 | struct intel_shadow_bb_entry *entry_obj; | ||
| 389 | 371 | ||
| 390 | /* pin the gem object to ggtt */ | 372 | /* pin the gem object to ggtt */ |
| 391 | if (!list_empty(&workload->shadow_bb)) { | 373 | list_for_each_entry(entry_obj, &workload->shadow_bb, list) { |
| 392 | struct intel_shadow_bb_entry *entry_obj = | 374 | struct i915_vma *vma; |
| 393 | list_first_entry(&workload->shadow_bb, | ||
| 394 | struct intel_shadow_bb_entry, | ||
| 395 | list); | ||
| 396 | struct intel_shadow_bb_entry *temp; | ||
| 397 | 375 | ||
| 398 | list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, | 376 | vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); |
| 399 | list) { | 377 | if (IS_ERR(vma)) { |
| 400 | struct i915_vma *vma; | 378 | gvt_err("Cannot pin\n"); |
| 401 | 379 | return; | |
| 402 | vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, | ||
| 403 | 4, 0); | ||
| 404 | if (IS_ERR(vma)) { | ||
| 405 | gvt_err("Cannot pin\n"); | ||
| 406 | return; | ||
| 407 | } | ||
| 408 | |||
| 409 | /* FIXME: we are not tracking our pinned VMA leaving it | ||
| 410 | * up to the core to fix up the stray pin_count upon | ||
| 411 | * free. | ||
| 412 | */ | ||
| 413 | |||
| 414 | /* update the relocate gma with shadow batch buffer*/ | ||
| 415 | set_gma_to_bb_cmd(entry_obj, | ||
| 416 | i915_ggtt_offset(vma), | ||
| 417 | gmadr_bytes); | ||
| 418 | } | 380 | } |
| 381 | |||
| 382 | /* FIXME: we are not tracking our pinned VMA leaving it | ||
| 383 | * up to the core to fix up the stray pin_count upon | ||
| 384 | * free. | ||
| 385 | */ | ||
| 386 | |||
| 387 | /* update the relocate gma with shadow batch buffer*/ | ||
| 388 | entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); | ||
| 389 | if (gmadr_bytes == 8) | ||
| 390 | entry_obj->bb_start_cmd_va[2] = 0; | ||
| 419 | } | 391 | } |
| 420 | } | 392 | } |
| 421 | 393 | ||
| @@ -515,7 +487,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
| 515 | 487 | ||
| 516 | static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | 488 | static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) |
| 517 | { | 489 | { |
| 518 | if (wa_ctx->indirect_ctx.size == 0) | 490 | if (!wa_ctx->indirect_ctx.obj) |
| 519 | return; | 491 | return; |
| 520 | 492 | ||
| 521 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); | 493 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); |
| @@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) | |||
| 826 | INIT_LIST_HEAD(&vgpu->workload_q_head[i]); | 798 | INIT_LIST_HEAD(&vgpu->workload_q_head[i]); |
| 827 | } | 799 | } |
| 828 | 800 | ||
| 829 | vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", | 801 | vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", |
| 830 | sizeof(struct intel_vgpu_workload), 0, | 802 | sizeof(struct intel_vgpu_workload), 0, |
| 831 | SLAB_HWCACHE_ALIGN, | 803 | SLAB_HWCACHE_ALIGN, |
| 832 | NULL); | 804 | NULL); |
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 2fae2a2ca96f..1cb29b2d7dc6 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c | |||
| @@ -48,31 +48,6 @@ struct gvt_firmware_header { | |||
| 48 | unsigned char data[1]; | 48 | unsigned char data[1]; |
| 49 | }; | 49 | }; |
| 50 | 50 | ||
| 51 | #define RD(offset) (readl(mmio + offset.reg)) | ||
| 52 | #define WR(v, offset) (writel(v, mmio + offset.reg)) | ||
| 53 | |||
| 54 | static void bdw_forcewake_get(void __iomem *mmio) | ||
| 55 | { | ||
| 56 | WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT); | ||
| 57 | |||
| 58 | RD(ECOBUS); | ||
| 59 | |||
| 60 | if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50)) | ||
| 61 | gvt_err("fail to wait forcewake idle\n"); | ||
| 62 | |||
| 63 | WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT); | ||
| 64 | |||
| 65 | if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50)) | ||
| 66 | gvt_err("fail to wait forcewake ack\n"); | ||
| 67 | |||
| 68 | if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) & | ||
| 69 | GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50)) | ||
| 70 | gvt_err("fail to wait c0 wake up\n"); | ||
| 71 | } | ||
| 72 | |||
| 73 | #undef RD | ||
| 74 | #undef WR | ||
| 75 | |||
| 76 | #define dev_to_drm_minor(d) dev_get_drvdata((d)) | 51 | #define dev_to_drm_minor(d) dev_get_drvdata((d)) |
| 77 | 52 | ||
| 78 | static ssize_t | 53 | static ssize_t |
| @@ -91,9 +66,9 @@ static struct bin_attribute firmware_attr = { | |||
| 91 | .mmap = NULL, | 66 | .mmap = NULL, |
| 92 | }; | 67 | }; |
| 93 | 68 | ||
| 94 | static int expose_firmware_sysfs(struct intel_gvt *gvt, | 69 | static int expose_firmware_sysfs(struct intel_gvt *gvt) |
| 95 | void __iomem *mmio) | ||
| 96 | { | 70 | { |
| 71 | struct drm_i915_private *dev_priv = gvt->dev_priv; | ||
| 97 | struct intel_gvt_device_info *info = &gvt->device_info; | 72 | struct intel_gvt_device_info *info = &gvt->device_info; |
| 98 | struct pci_dev *pdev = gvt->dev_priv->drm.pdev; | 73 | struct pci_dev *pdev = gvt->dev_priv->drm.pdev; |
| 99 | struct intel_gvt_mmio_info *e; | 74 | struct intel_gvt_mmio_info *e; |
| @@ -132,7 +107,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt, | |||
| 132 | 107 | ||
| 133 | for (j = 0; j < e->length; j += 4) | 108 | for (j = 0; j < e->length; j += 4) |
| 134 | *(u32 *)(p + e->offset + j) = | 109 | *(u32 *)(p + e->offset + j) = |
| 135 | readl(mmio + e->offset + j); | 110 | I915_READ_NOTRACE(_MMIO(e->offset + j)); |
| 136 | } | 111 | } |
| 137 | 112 | ||
| 138 | memcpy(gvt->firmware.mmio, p, info->mmio_size); | 113 | memcpy(gvt->firmware.mmio, p, info->mmio_size); |
| @@ -235,7 +210,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) | |||
| 235 | struct gvt_firmware_header *h; | 210 | struct gvt_firmware_header *h; |
| 236 | const struct firmware *fw; | 211 | const struct firmware *fw; |
| 237 | char *path; | 212 | char *path; |
| 238 | void __iomem *mmio; | ||
| 239 | void *mem; | 213 | void *mem; |
| 240 | int ret; | 214 | int ret; |
| 241 | 215 | ||
| @@ -260,17 +234,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) | |||
| 260 | 234 | ||
| 261 | firmware->mmio = mem; | 235 | firmware->mmio = mem; |
| 262 | 236 | ||
| 263 | mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size); | ||
| 264 | if (!mmio) { | ||
| 265 | kfree(path); | ||
| 266 | kfree(firmware->cfg_space); | ||
| 267 | kfree(firmware->mmio); | ||
| 268 | return -EINVAL; | ||
| 269 | } | ||
| 270 | |||
| 271 | if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) | ||
| 272 | bdw_forcewake_get(mmio); | ||
| 273 | |||
| 274 | sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", | 237 | sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", |
| 275 | GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, | 238 | GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, |
| 276 | pdev->revision); | 239 | pdev->revision); |
| @@ -300,13 +263,11 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) | |||
| 300 | 263 | ||
| 301 | release_firmware(fw); | 264 | release_firmware(fw); |
| 302 | firmware->firmware_loaded = true; | 265 | firmware->firmware_loaded = true; |
| 303 | pci_iounmap(pdev, mmio); | ||
| 304 | return 0; | 266 | return 0; |
| 305 | 267 | ||
| 306 | out_free_fw: | 268 | out_free_fw: |
| 307 | release_firmware(fw); | 269 | release_firmware(fw); |
| 308 | expose_firmware: | 270 | expose_firmware: |
| 309 | expose_firmware_sysfs(gvt, mmio); | 271 | expose_firmware_sysfs(gvt); |
| 310 | pci_iounmap(pdev, mmio); | ||
| 311 | return 0; | 272 | return 0; |
| 312 | } | 273 | } |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 47dec4acf7ff..28c92346db0e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
| @@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( | |||
| 606 | static inline int init_shadow_page(struct intel_vgpu *vgpu, | 606 | static inline int init_shadow_page(struct intel_vgpu *vgpu, |
| 607 | struct intel_vgpu_shadow_page *p, int type) | 607 | struct intel_vgpu_shadow_page *p, int type) |
| 608 | { | 608 | { |
| 609 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
| 610 | dma_addr_t daddr; | ||
| 611 | |||
| 612 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | ||
| 613 | if (dma_mapping_error(kdev, daddr)) { | ||
| 614 | gvt_err("fail to map dma addr\n"); | ||
| 615 | return -EINVAL; | ||
| 616 | } | ||
| 617 | |||
| 609 | p->vaddr = page_address(p->page); | 618 | p->vaddr = page_address(p->page); |
| 610 | p->type = type; | 619 | p->type = type; |
| 611 | 620 | ||
| 612 | INIT_HLIST_NODE(&p->node); | 621 | INIT_HLIST_NODE(&p->node); |
| 613 | 622 | ||
| 614 | p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr); | 623 | p->mfn = daddr >> GTT_PAGE_SHIFT; |
| 615 | if (p->mfn == INTEL_GVT_INVALID_ADDR) | ||
| 616 | return -EFAULT; | ||
| 617 | |||
| 618 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); | 624 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); |
| 619 | return 0; | 625 | return 0; |
| 620 | } | 626 | } |
| 621 | 627 | ||
| 622 | static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p) | 628 | static inline void clean_shadow_page(struct intel_vgpu *vgpu, |
| 629 | struct intel_vgpu_shadow_page *p) | ||
| 623 | { | 630 | { |
| 631 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
| 632 | |||
| 633 | dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, | ||
| 634 | PCI_DMA_BIDIRECTIONAL); | ||
| 635 | |||
| 624 | if (!hlist_unhashed(&p->node)) | 636 | if (!hlist_unhashed(&p->node)) |
| 625 | hash_del(&p->node); | 637 | hash_del(&p->node); |
| 626 | } | 638 | } |
| @@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
| 670 | { | 682 | { |
| 671 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); | 683 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); |
| 672 | 684 | ||
| 673 | clean_shadow_page(&spt->shadow_page); | 685 | clean_shadow_page(spt->vgpu, &spt->shadow_page); |
| 674 | intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); | 686 | intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); |
| 675 | list_del_init(&spt->post_shadow_list); | 687 | list_del_init(&spt->post_shadow_list); |
| 676 | 688 | ||
| @@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
| 1875 | int page_entry_num = GTT_PAGE_SIZE >> | 1887 | int page_entry_num = GTT_PAGE_SIZE >> |
| 1876 | vgpu->gvt->device_info.gtt_entry_size_shift; | 1888 | vgpu->gvt->device_info.gtt_entry_size_shift; |
| 1877 | void *scratch_pt; | 1889 | void *scratch_pt; |
| 1878 | unsigned long mfn; | ||
| 1879 | int i; | 1890 | int i; |
| 1891 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
| 1892 | dma_addr_t daddr; | ||
| 1880 | 1893 | ||
| 1881 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) | 1894 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) |
| 1882 | return -EINVAL; | 1895 | return -EINVAL; |
| @@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
| 1887 | return -ENOMEM; | 1900 | return -ENOMEM; |
| 1888 | } | 1901 | } |
| 1889 | 1902 | ||
| 1890 | mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); | 1903 | daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, |
| 1891 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 1904 | 4096, PCI_DMA_BIDIRECTIONAL); |
| 1892 | gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); | 1905 | if (dma_mapping_error(dev, daddr)) { |
| 1893 | free_page((unsigned long)scratch_pt); | 1906 | gvt_err("fail to dmamap scratch_pt\n"); |
| 1894 | return -EFAULT; | 1907 | __free_page(virt_to_page(scratch_pt)); |
| 1908 | return -ENOMEM; | ||
| 1895 | } | 1909 | } |
| 1896 | gtt->scratch_pt[type].page_mfn = mfn; | 1910 | gtt->scratch_pt[type].page_mfn = |
| 1911 | (unsigned long)(daddr >> GTT_PAGE_SHIFT); | ||
| 1897 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); | 1912 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); |
| 1898 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", | 1913 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", |
| 1899 | vgpu->id, type, mfn); | 1914 | vgpu->id, type, gtt->scratch_pt[type].page_mfn); |
| 1900 | 1915 | ||
| 1901 | /* Build the tree by full filled the scratch pt with the entries which | 1916 | /* Build the tree by full filled the scratch pt with the entries which |
| 1902 | * point to the next level scratch pt or scratch page. The | 1917 | * point to the next level scratch pt or scratch page. The |
| @@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
| 1930 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) | 1945 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) |
| 1931 | { | 1946 | { |
| 1932 | int i; | 1947 | int i; |
| 1948 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
| 1949 | dma_addr_t daddr; | ||
| 1933 | 1950 | ||
| 1934 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { | 1951 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { |
| 1935 | if (vgpu->gtt.scratch_pt[i].page != NULL) { | 1952 | if (vgpu->gtt.scratch_pt[i].page != NULL) { |
| 1953 | daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << | ||
| 1954 | GTT_PAGE_SHIFT); | ||
| 1955 | dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | ||
| 1936 | __free_page(vgpu->gtt.scratch_pt[i].page); | 1956 | __free_page(vgpu->gtt.scratch_pt[i].page); |
| 1937 | vgpu->gtt.scratch_pt[i].page = NULL; | 1957 | vgpu->gtt.scratch_pt[i].page = NULL; |
| 1938 | vgpu->gtt.scratch_pt[i].page_mfn = 0; | 1958 | vgpu->gtt.scratch_pt[i].page_mfn = 0; |
| @@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
| 2192 | { | 2212 | { |
| 2193 | int ret; | 2213 | int ret; |
| 2194 | void *page; | 2214 | void *page; |
| 2215 | struct device *dev = &gvt->dev_priv->drm.pdev->dev; | ||
| 2216 | dma_addr_t daddr; | ||
| 2195 | 2217 | ||
| 2196 | gvt_dbg_core("init gtt\n"); | 2218 | gvt_dbg_core("init gtt\n"); |
| 2197 | 2219 | ||
| @@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
| 2209 | gvt_err("fail to allocate scratch ggtt page\n"); | 2231 | gvt_err("fail to allocate scratch ggtt page\n"); |
| 2210 | return -ENOMEM; | 2232 | return -ENOMEM; |
| 2211 | } | 2233 | } |
| 2212 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
| 2213 | 2234 | ||
| 2214 | gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); | 2235 | daddr = dma_map_page(dev, virt_to_page(page), 0, |
| 2215 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { | 2236 | 4096, PCI_DMA_BIDIRECTIONAL); |
| 2216 | gvt_err("fail to translate scratch ggtt page\n"); | 2237 | if (dma_mapping_error(dev, daddr)) { |
| 2217 | __free_page(gvt->gtt.scratch_ggtt_page); | 2238 | gvt_err("fail to dmamap scratch ggtt page\n"); |
| 2218 | return -EFAULT; | 2239 | __free_page(virt_to_page(page)); |
| 2240 | return -ENOMEM; | ||
| 2219 | } | 2241 | } |
| 2242 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
| 2243 | gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); | ||
| 2220 | 2244 | ||
| 2221 | if (enable_out_of_sync) { | 2245 | if (enable_out_of_sync) { |
| 2222 | ret = setup_spt_oos(gvt); | 2246 | ret = setup_spt_oos(gvt); |
| @@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
| 2239 | */ | 2263 | */ |
| 2240 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) | 2264 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) |
| 2241 | { | 2265 | { |
| 2266 | struct device *dev = &gvt->dev_priv->drm.pdev->dev; | ||
| 2267 | dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << | ||
| 2268 | GTT_PAGE_SHIFT); | ||
| 2269 | |||
| 2270 | dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | ||
| 2271 | |||
| 2242 | __free_page(gvt->gtt.scratch_ggtt_page); | 2272 | __free_page(gvt->gtt.scratch_ggtt_page); |
| 2243 | 2273 | ||
| 2244 | if (enable_out_of_sync) | 2274 | if (enable_out_of_sync) |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index e6bf5c533fbe..3b9d59e457ba 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c | |||
| @@ -68,8 +68,6 @@ static const struct intel_gvt_ops intel_gvt_ops = { | |||
| 68 | */ | 68 | */ |
| 69 | int intel_gvt_init_host(void) | 69 | int intel_gvt_init_host(void) |
| 70 | { | 70 | { |
| 71 | int ret; | ||
| 72 | |||
| 73 | if (intel_gvt_host.initialized) | 71 | if (intel_gvt_host.initialized) |
| 74 | return 0; | 72 | return 0; |
| 75 | 73 | ||
| @@ -96,11 +94,6 @@ int intel_gvt_init_host(void) | |||
| 96 | if (!intel_gvt_host.mpt) | 94 | if (!intel_gvt_host.mpt) |
| 97 | return -EINVAL; | 95 | return -EINVAL; |
| 98 | 96 | ||
| 99 | /* Try to detect if we're running in host instead of VM. */ | ||
| 100 | ret = intel_gvt_hypervisor_detect_host(); | ||
| 101 | if (ret) | ||
| 102 | return -ENODEV; | ||
| 103 | |||
| 104 | gvt_dbg_core("Running with hypervisor %s in host mode\n", | 97 | gvt_dbg_core("Running with hypervisor %s in host mode\n", |
| 105 | supported_hypervisors[intel_gvt_host.hypervisor_type]); | 98 | supported_hypervisors[intel_gvt_host.hypervisor_type]); |
| 106 | 99 | ||
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 30e543f5a703..df7f33abd393 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | * both Xen and KVM by providing dedicated hypervisor-related MPT modules. | 38 | * both Xen and KVM by providing dedicated hypervisor-related MPT modules. |
| 39 | */ | 39 | */ |
| 40 | struct intel_gvt_mpt { | 40 | struct intel_gvt_mpt { |
| 41 | int (*detect_host)(void); | ||
| 42 | int (*host_init)(struct device *dev, void *gvt, const void *ops); | 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); |
| 43 | void (*host_exit)(struct device *dev, void *gvt); | 42 | void (*host_exit)(struct device *dev, void *gvt); |
| 44 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); | 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); |
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index f7be02ac4be1..92bb247e3478 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c | |||
| @@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, | |||
| 176 | { | 176 | { |
| 177 | struct intel_gvt *gvt = vgpu->gvt; | 177 | struct intel_gvt *gvt = vgpu->gvt; |
| 178 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 178 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
| 179 | u32 changed, masked, unmasked; | ||
| 180 | u32 imr = *(u32 *)p_data; | 179 | u32 imr = *(u32 *)p_data; |
| 181 | 180 | ||
| 182 | gvt_dbg_irq("write IMR %x with val %x\n", | 181 | gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n", |
| 183 | reg, imr); | 182 | reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr); |
| 184 | |||
| 185 | gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg)); | ||
| 186 | |||
| 187 | /* figure out newly masked/unmasked bits */ | ||
| 188 | changed = vgpu_vreg(vgpu, reg) ^ imr; | ||
| 189 | masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed; | ||
| 190 | unmasked = masked ^ changed; | ||
| 191 | |||
| 192 | gvt_dbg_irq("changed %x, masked %x, unmasked %x\n", | ||
| 193 | changed, masked, unmasked); | ||
| 194 | 183 | ||
| 195 | vgpu_vreg(vgpu, reg) = imr; | 184 | vgpu_vreg(vgpu, reg) = imr; |
| 196 | 185 | ||
| 197 | ops->check_pending_irq(vgpu); | 186 | ops->check_pending_irq(vgpu); |
| 198 | gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg)); | 187 | |
| 199 | return 0; | 188 | return 0; |
| 200 | } | 189 | } |
| 201 | 190 | ||
| @@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, | |||
| 217 | { | 206 | { |
| 218 | struct intel_gvt *gvt = vgpu->gvt; | 207 | struct intel_gvt *gvt = vgpu->gvt; |
| 219 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 208 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
| 220 | u32 changed, enabled, disabled; | ||
| 221 | u32 ier = *(u32 *)p_data; | 209 | u32 ier = *(u32 *)p_data; |
| 222 | u32 virtual_ier = vgpu_vreg(vgpu, reg); | 210 | u32 virtual_ier = vgpu_vreg(vgpu, reg); |
| 223 | 211 | ||
| 224 | gvt_dbg_irq("write master irq reg %x with val %x\n", | 212 | gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n", |
| 225 | reg, ier); | 213 | reg, ier, virtual_ier, virtual_ier ^ ier); |
| 226 | |||
| 227 | gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg)); | ||
| 228 | 214 | ||
| 229 | /* | 215 | /* |
| 230 | * GEN8_MASTER_IRQ is a special irq register, | 216 | * GEN8_MASTER_IRQ is a special irq register, |
| @@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, | |||
| 236 | vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL; | 222 | vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL; |
| 237 | vgpu_vreg(vgpu, reg) |= ier; | 223 | vgpu_vreg(vgpu, reg) |= ier; |
| 238 | 224 | ||
| 239 | /* figure out newly enabled/disable bits */ | ||
| 240 | changed = virtual_ier ^ ier; | ||
| 241 | enabled = (virtual_ier & changed) ^ changed; | ||
| 242 | disabled = enabled ^ changed; | ||
| 243 | |||
| 244 | gvt_dbg_irq("changed %x, enabled %x, disabled %x\n", | ||
| 245 | changed, enabled, disabled); | ||
| 246 | |||
| 247 | ops->check_pending_irq(vgpu); | 225 | ops->check_pending_irq(vgpu); |
| 248 | gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg)); | 226 | |
| 249 | return 0; | 227 | return 0; |
| 250 | } | 228 | } |
| 251 | 229 | ||
| @@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, | |||
| 268 | struct intel_gvt *gvt = vgpu->gvt; | 246 | struct intel_gvt *gvt = vgpu->gvt; |
| 269 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 247 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
| 270 | struct intel_gvt_irq_info *info; | 248 | struct intel_gvt_irq_info *info; |
| 271 | u32 changed, enabled, disabled; | ||
| 272 | u32 ier = *(u32 *)p_data; | 249 | u32 ier = *(u32 *)p_data; |
| 273 | 250 | ||
| 274 | gvt_dbg_irq("write IER %x with val %x\n", | 251 | gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n", |
| 275 | reg, ier); | 252 | reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier); |
| 276 | |||
| 277 | gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg)); | ||
| 278 | 253 | ||
| 279 | /* figure out newly enabled/disable bits */ | ||
| 280 | changed = vgpu_vreg(vgpu, reg) ^ ier; | ||
| 281 | enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed; | ||
| 282 | disabled = enabled ^ changed; | ||
| 283 | |||
| 284 | gvt_dbg_irq("changed %x, enabled %x, disabled %x\n", | ||
| 285 | changed, enabled, disabled); | ||
| 286 | vgpu_vreg(vgpu, reg) = ier; | 254 | vgpu_vreg(vgpu, reg) = ier; |
| 287 | 255 | ||
| 288 | info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); | 256 | info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); |
| @@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, | |||
| 293 | update_upstream_irq(vgpu, info); | 261 | update_upstream_irq(vgpu, info); |
| 294 | 262 | ||
| 295 | ops->check_pending_irq(vgpu); | 263 | ops->check_pending_irq(vgpu); |
| 296 | gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg)); | 264 | |
| 297 | return 0; | 265 | return 0; |
| 298 | } | 266 | } |
| 299 | 267 | ||
| @@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, | |||
| 317 | iir_to_regbase(reg)); | 285 | iir_to_regbase(reg)); |
| 318 | u32 iir = *(u32 *)p_data; | 286 | u32 iir = *(u32 *)p_data; |
| 319 | 287 | ||
| 320 | gvt_dbg_irq("write IIR %x with val %x\n", reg, iir); | 288 | gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n", |
| 289 | reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir); | ||
| 321 | 290 | ||
| 322 | if (WARN_ON(!info)) | 291 | if (WARN_ON(!info)) |
| 323 | return -EINVAL; | 292 | return -EINVAL; |
| @@ -619,6 +588,10 @@ static void gen8_init_irq( | |||
| 619 | SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); | 588 | SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); |
| 620 | SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); | 589 | SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); |
| 621 | SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); | 590 | SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); |
| 591 | |||
| 592 | SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); | ||
| 593 | SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); | ||
| 594 | SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); | ||
| 622 | } | 595 | } |
| 623 | 596 | ||
| 624 | /* GEN8 interrupt PCU events */ | 597 | /* GEN8 interrupt PCU events */ |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 0c9234a87a20..0f7f5d97f582 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
| @@ -77,7 +77,7 @@ struct kvmgt_guest_info { | |||
| 77 | struct gvt_dma { | 77 | struct gvt_dma { |
| 78 | struct rb_node node; | 78 | struct rb_node node; |
| 79 | gfn_t gfn; | 79 | gfn_t gfn; |
| 80 | kvm_pfn_t pfn; | 80 | unsigned long iova; |
| 81 | }; | 81 | }; |
| 82 | 82 | ||
| 83 | static inline bool handle_valid(unsigned long handle) | 83 | static inline bool handle_valid(unsigned long handle) |
| @@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev); | |||
| 89 | static void intel_vgpu_release_work(struct work_struct *work); | 89 | static void intel_vgpu_release_work(struct work_struct *work); |
| 90 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); | 90 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); |
| 91 | 91 | ||
| 92 | static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, | ||
| 93 | unsigned long *iova) | ||
| 94 | { | ||
| 95 | struct page *page; | ||
| 96 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
| 97 | dma_addr_t daddr; | ||
| 98 | |||
| 99 | page = pfn_to_page(pfn); | ||
| 100 | if (is_error_page(page)) | ||
| 101 | return -EFAULT; | ||
| 102 | |||
| 103 | daddr = dma_map_page(dev, page, 0, PAGE_SIZE, | ||
| 104 | PCI_DMA_BIDIRECTIONAL); | ||
| 105 | if (dma_mapping_error(dev, daddr)) | ||
| 106 | return -ENOMEM; | ||
| 107 | |||
| 108 | *iova = (unsigned long)(daddr >> PAGE_SHIFT); | ||
| 109 | return 0; | ||
| 110 | } | ||
| 111 | |||
| 112 | static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova) | ||
| 113 | { | ||
| 114 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
| 115 | dma_addr_t daddr; | ||
| 116 | |||
| 117 | daddr = (dma_addr_t)(iova << PAGE_SHIFT); | ||
| 118 | dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
| 119 | } | ||
| 120 | |||
| 92 | static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) | 121 | static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
| 93 | { | 122 | { |
| 94 | struct rb_node *node = vgpu->vdev.cache.rb_node; | 123 | struct rb_node *node = vgpu->vdev.cache.rb_node; |
| @@ -111,21 +140,22 @@ out: | |||
| 111 | return ret; | 140 | return ret; |
| 112 | } | 141 | } |
| 113 | 142 | ||
| 114 | static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) | 143 | static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
| 115 | { | 144 | { |
| 116 | struct gvt_dma *entry; | 145 | struct gvt_dma *entry; |
| 117 | kvm_pfn_t pfn; | 146 | unsigned long iova; |
| 118 | 147 | ||
| 119 | mutex_lock(&vgpu->vdev.cache_lock); | 148 | mutex_lock(&vgpu->vdev.cache_lock); |
| 120 | 149 | ||
| 121 | entry = __gvt_cache_find(vgpu, gfn); | 150 | entry = __gvt_cache_find(vgpu, gfn); |
| 122 | pfn = (entry == NULL) ? 0 : entry->pfn; | 151 | iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova; |
| 123 | 152 | ||
| 124 | mutex_unlock(&vgpu->vdev.cache_lock); | 153 | mutex_unlock(&vgpu->vdev.cache_lock); |
| 125 | return pfn; | 154 | return iova; |
| 126 | } | 155 | } |
| 127 | 156 | ||
| 128 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) | 157 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, |
| 158 | unsigned long iova) | ||
| 129 | { | 159 | { |
| 130 | struct gvt_dma *new, *itr; | 160 | struct gvt_dma *new, *itr; |
| 131 | struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; | 161 | struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; |
| @@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) | |||
| 135 | return; | 165 | return; |
| 136 | 166 | ||
| 137 | new->gfn = gfn; | 167 | new->gfn = gfn; |
| 138 | new->pfn = pfn; | 168 | new->iova = iova; |
| 139 | 169 | ||
| 140 | mutex_lock(&vgpu->vdev.cache_lock); | 170 | mutex_lock(&vgpu->vdev.cache_lock); |
| 141 | while (*link) { | 171 | while (*link) { |
| @@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) | |||
| 182 | } | 212 | } |
| 183 | 213 | ||
| 184 | g1 = gfn; | 214 | g1 = gfn; |
| 215 | gvt_dma_unmap_iova(vgpu, this->iova); | ||
| 185 | rc = vfio_unpin_pages(dev, &g1, 1); | 216 | rc = vfio_unpin_pages(dev, &g1, 1); |
| 186 | WARN_ON(rc != 1); | 217 | WARN_ON(rc != 1); |
| 187 | __gvt_cache_remove_entry(vgpu, this); | 218 | __gvt_cache_remove_entry(vgpu, this); |
| @@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) | |||
| 204 | mutex_lock(&vgpu->vdev.cache_lock); | 235 | mutex_lock(&vgpu->vdev.cache_lock); |
| 205 | while ((node = rb_first(&vgpu->vdev.cache))) { | 236 | while ((node = rb_first(&vgpu->vdev.cache))) { |
| 206 | dma = rb_entry(node, struct gvt_dma, node); | 237 | dma = rb_entry(node, struct gvt_dma, node); |
| 238 | gvt_dma_unmap_iova(vgpu, dma->iova); | ||
| 207 | gfn = dma->gfn; | 239 | gfn = dma->gfn; |
| 208 | 240 | ||
| 209 | vfio_unpin_pages(dev, &gfn, 1); | 241 | vfio_unpin_pages(dev, &gfn, 1); |
| @@ -230,8 +262,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, | |||
| 230 | return NULL; | 262 | return NULL; |
| 231 | } | 263 | } |
| 232 | 264 | ||
| 233 | static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, | 265 | static ssize_t available_instances_show(struct kobject *kobj, |
| 234 | char *buf) | 266 | struct device *dev, char *buf) |
| 235 | { | 267 | { |
| 236 | struct intel_vgpu_type *type; | 268 | struct intel_vgpu_type *type; |
| 237 | unsigned int num = 0; | 269 | unsigned int num = 0; |
| @@ -269,12 +301,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, | |||
| 269 | type->fence); | 301 | type->fence); |
| 270 | } | 302 | } |
| 271 | 303 | ||
| 272 | static MDEV_TYPE_ATTR_RO(available_instance); | 304 | static MDEV_TYPE_ATTR_RO(available_instances); |
| 273 | static MDEV_TYPE_ATTR_RO(device_api); | 305 | static MDEV_TYPE_ATTR_RO(device_api); |
| 274 | static MDEV_TYPE_ATTR_RO(description); | 306 | static MDEV_TYPE_ATTR_RO(description); |
| 275 | 307 | ||
| 276 | static struct attribute *type_attrs[] = { | 308 | static struct attribute *type_attrs[] = { |
| 277 | &mdev_type_attr_available_instance.attr, | 309 | &mdev_type_attr_available_instances.attr, |
| 278 | &mdev_type_attr_device_api.attr, | 310 | &mdev_type_attr_device_api.attr, |
| 279 | &mdev_type_attr_description.attr, | 311 | &mdev_type_attr_description.attr, |
| 280 | NULL, | 312 | NULL, |
| @@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, | |||
| 965 | sparse->areas[0].offset = | 997 | sparse->areas[0].offset = |
| 966 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); | 998 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); |
| 967 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); | 999 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); |
| 968 | if (!caps.buf) { | ||
| 969 | kfree(caps.buf); | ||
| 970 | caps.buf = NULL; | ||
| 971 | caps.size = 0; | ||
| 972 | } | ||
| 973 | break; | 1000 | break; |
| 974 | 1001 | ||
| 975 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: | 1002 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: |
| @@ -1248,43 +1275,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, | |||
| 1248 | spin_unlock(&kvm->mmu_lock); | 1275 | spin_unlock(&kvm->mmu_lock); |
| 1249 | } | 1276 | } |
| 1250 | 1277 | ||
| 1251 | static bool kvmgt_check_guest(void) | ||
| 1252 | { | ||
| 1253 | unsigned int eax, ebx, ecx, edx; | ||
| 1254 | char s[12]; | ||
| 1255 | unsigned int *i; | ||
| 1256 | |||
| 1257 | eax = KVM_CPUID_SIGNATURE; | ||
| 1258 | ebx = ecx = edx = 0; | ||
| 1259 | |||
| 1260 | asm volatile ("cpuid" | ||
| 1261 | : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) | ||
| 1262 | : | ||
| 1263 | : "cc", "memory"); | ||
| 1264 | i = (unsigned int *)s; | ||
| 1265 | i[0] = ebx; | ||
| 1266 | i[1] = ecx; | ||
| 1267 | i[2] = edx; | ||
| 1268 | |||
| 1269 | return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM")); | ||
| 1270 | } | ||
| 1271 | |||
| 1272 | /** | ||
| 1273 | * NOTE: | ||
| 1274 | * It's actually impossible to check if we are running in KVM host, | ||
| 1275 | * since the "KVM host" is simply native. So we only dectect guest here. | ||
| 1276 | */ | ||
| 1277 | static int kvmgt_detect_host(void) | ||
| 1278 | { | ||
| 1279 | #ifdef CONFIG_INTEL_IOMMU | ||
| 1280 | if (intel_iommu_gfx_mapped) { | ||
| 1281 | gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n"); | ||
| 1282 | return -ENODEV; | ||
| 1283 | } | ||
| 1284 | #endif | ||
| 1285 | return kvmgt_check_guest() ? -ENODEV : 0; | ||
| 1286 | } | ||
| 1287 | |||
| 1288 | static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) | 1278 | static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) |
| 1289 | { | 1279 | { |
| 1290 | struct intel_vgpu *itr; | 1280 | struct intel_vgpu *itr; |
| @@ -1390,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | |||
| 1390 | 1380 | ||
| 1391 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | 1381 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) |
| 1392 | { | 1382 | { |
| 1393 | unsigned long pfn; | 1383 | unsigned long iova, pfn; |
| 1394 | struct kvmgt_guest_info *info; | 1384 | struct kvmgt_guest_info *info; |
| 1395 | struct device *dev; | 1385 | struct device *dev; |
| 1396 | int rc; | 1386 | int rc; |
| @@ -1399,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |||
| 1399 | return INTEL_GVT_INVALID_ADDR; | 1389 | return INTEL_GVT_INVALID_ADDR; |
| 1400 | 1390 | ||
| 1401 | info = (struct kvmgt_guest_info *)handle; | 1391 | info = (struct kvmgt_guest_info *)handle; |
| 1402 | pfn = gvt_cache_find(info->vgpu, gfn); | 1392 | iova = gvt_cache_find(info->vgpu, gfn); |
| 1403 | if (pfn != 0) | 1393 | if (iova != INTEL_GVT_INVALID_ADDR) |
| 1404 | return pfn; | 1394 | return iova; |
| 1405 | 1395 | ||
| 1406 | pfn = INTEL_GVT_INVALID_ADDR; | 1396 | pfn = INTEL_GVT_INVALID_ADDR; |
| 1407 | dev = mdev_dev(info->vgpu->vdev.mdev); | 1397 | dev = mdev_dev(info->vgpu->vdev.mdev); |
| @@ -1410,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |||
| 1410 | gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); | 1400 | gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); |
| 1411 | return INTEL_GVT_INVALID_ADDR; | 1401 | return INTEL_GVT_INVALID_ADDR; |
| 1412 | } | 1402 | } |
| 1403 | /* transfer to host iova for GFX to use DMA */ | ||
| 1404 | rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); | ||
| 1405 | if (rc) { | ||
| 1406 | gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); | ||
| 1407 | vfio_unpin_pages(dev, &gfn, 1); | ||
| 1408 | return INTEL_GVT_INVALID_ADDR; | ||
| 1409 | } | ||
| 1413 | 1410 | ||
| 1414 | gvt_cache_add(info->vgpu, gfn, pfn); | 1411 | gvt_cache_add(info->vgpu, gfn, iova); |
| 1415 | return pfn; | 1412 | return iova; |
| 1416 | } | 1413 | } |
| 1417 | 1414 | ||
| 1418 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, | 1415 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, |
| @@ -1459,7 +1456,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) | |||
| 1459 | } | 1456 | } |
| 1460 | 1457 | ||
| 1461 | struct intel_gvt_mpt kvmgt_mpt = { | 1458 | struct intel_gvt_mpt kvmgt_mpt = { |
| 1462 | .detect_host = kvmgt_detect_host, | ||
| 1463 | .host_init = kvmgt_host_init, | 1459 | .host_init = kvmgt_host_init, |
| 1464 | .host_exit = kvmgt_host_exit, | 1460 | .host_exit = kvmgt_host_exit, |
| 1465 | .attach_vgpu = kvmgt_attach_vgpu, | 1461 | .attach_vgpu = kvmgt_attach_vgpu, |
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 1af5830c0a56..419353624c5a 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h | |||
| @@ -44,18 +44,6 @@ | |||
| 44 | */ | 44 | */ |
| 45 | 45 | ||
| 46 | /** | 46 | /** |
| 47 | * intel_gvt_hypervisor_detect_host - check if GVT-g is running within | ||
| 48 | * hypervisor host/privilged domain | ||
| 49 | * | ||
| 50 | * Returns: | ||
| 51 | * Zero on success, -ENODEV if current kernel is running inside a VM | ||
| 52 | */ | ||
| 53 | static inline int intel_gvt_hypervisor_detect_host(void) | ||
| 54 | { | ||
| 55 | return intel_gvt_host.mpt->detect_host(); | ||
| 56 | } | ||
| 57 | |||
| 58 | /** | ||
| 59 | * intel_gvt_hypervisor_host_init - init GVT-g host side | 47 | * intel_gvt_hypervisor_host_init - init GVT-g host side |
| 60 | * | 48 | * |
| 61 | * Returns: | 49 | * Returns: |
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 44136b1f3aab..2b3a642284b6 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c | |||
| @@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) | |||
| 236 | } | 236 | } |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | #define CTX_CONTEXT_CONTROL_VAL 0x03 | ||
| 240 | |||
| 239 | void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) | 241 | void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) |
| 240 | { | 242 | { |
| 241 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 243 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
| 242 | struct render_mmio *mmio; | 244 | struct render_mmio *mmio; |
| 243 | u32 v; | 245 | u32 v; |
| 244 | int i, array_size; | 246 | int i, array_size; |
| 247 | u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state; | ||
| 248 | u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; | ||
| 249 | u32 inhibit_mask = | ||
| 250 | _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); | ||
| 245 | 251 | ||
| 246 | if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { | 252 | if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { |
| 247 | mmio = gen9_render_mmio_list; | 253 | mmio = gen9_render_mmio_list; |
| @@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) | |||
| 257 | continue; | 263 | continue; |
| 258 | 264 | ||
| 259 | mmio->value = I915_READ(mmio->reg); | 265 | mmio->value = I915_READ(mmio->reg); |
| 266 | |||
| 267 | /* | ||
| 268 | * if it is an inhibit context, load in_context mmio | ||
| 269 | * into HW by mmio write. If it is not, skip this mmio | ||
| 270 | * write. | ||
| 271 | */ | ||
| 272 | if (mmio->in_context && | ||
| 273 | ((ctx_ctrl & inhibit_mask) != inhibit_mask) && | ||
| 274 | i915.enable_execlists) | ||
| 275 | continue; | ||
| 276 | |||
| 260 | if (mmio->mask) | 277 | if (mmio->mask) |
| 261 | v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16); | 278 | v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16); |
| 262 | else | 279 | else |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 678b0be85376..06c9584ac5f0 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
| @@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work) | |||
| 125 | vgpu_data = scheduler->current_vgpu->sched_data; | 125 | vgpu_data = scheduler->current_vgpu->sched_data; |
| 126 | head = &vgpu_data->list; | 126 | head = &vgpu_data->list; |
| 127 | } else { | 127 | } else { |
| 128 | gvt_dbg_sched("no current vgpu search from q head\n"); | ||
| 129 | head = &sched_data->runq_head; | 128 | head = &sched_data->runq_head; |
| 130 | } | 129 | } |
| 131 | 130 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 7ea68a75dc46..d6b6d0efdd1a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
| 169 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", | 169 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", |
| 170 | ring_id, workload); | 170 | ring_id, workload); |
| 171 | 171 | ||
| 172 | shadow_ctx->desc_template = workload->ctx_desc.addressing_mode << | 172 | shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); |
| 173 | shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << | ||
| 173 | GEN8_CTX_ADDRESSING_MODE_SHIFT; | 174 | GEN8_CTX_ADDRESSING_MODE_SHIFT; |
| 174 | 175 | ||
| 175 | mutex_lock(&dev_priv->drm.struct_mutex); | 176 | mutex_lock(&dev_priv->drm.struct_mutex); |
| @@ -456,7 +457,7 @@ static int workload_thread(void *priv) | |||
| 456 | } | 457 | } |
| 457 | 458 | ||
| 458 | complete: | 459 | complete: |
| 459 | gvt_dbg_sched("will complete workload %p\n, status: %d\n", | 460 | gvt_dbg_sched("will complete workload %p, status: %d\n", |
| 460 | workload, workload->status); | 461 | workload, workload->status); |
| 461 | 462 | ||
| 462 | if (workload->req) | 463 | if (workload->req) |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3b30c28bff51..2833dfa8c9ae 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
| @@ -113,7 +113,7 @@ struct intel_shadow_bb_entry { | |||
| 113 | struct drm_i915_gem_object *obj; | 113 | struct drm_i915_gem_object *obj; |
| 114 | void *va; | 114 | void *va; |
| 115 | unsigned long len; | 115 | unsigned long len; |
| 116 | void *bb_start_cmd_va; | 116 | u32 *bb_start_cmd_va; |
| 117 | }; | 117 | }; |
| 118 | 118 | ||
| 119 | #define workload_q_head(vgpu, ring_id) \ | 119 | #define workload_q_head(vgpu, ring_id) \ |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 7295bc8e12fb..95a97aa0051e 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
| @@ -74,7 +74,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) | |||
| 74 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | 74 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) |
| 75 | { | 75 | { |
| 76 | unsigned int num_types; | 76 | unsigned int num_types; |
| 77 | unsigned int i, low_avail; | 77 | unsigned int i, low_avail, high_avail; |
| 78 | unsigned int min_low; | 78 | unsigned int min_low; |
| 79 | 79 | ||
| 80 | /* vGPU type name is defined as GVTg_Vx_y which contains | 80 | /* vGPU type name is defined as GVTg_Vx_y which contains |
| @@ -89,9 +89,9 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
| 89 | * to indicate how many vGPU instance can be created for this | 89 | * to indicate how many vGPU instance can be created for this |
| 90 | * type. | 90 | * type. |
| 91 | * | 91 | * |
| 92 | * Currently use static size here as we init type earlier.. | ||
| 93 | */ | 92 | */ |
| 94 | low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE; | 93 | low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; |
| 94 | high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; | ||
| 95 | num_types = 4; | 95 | num_types = 4; |
| 96 | 96 | ||
| 97 | gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), | 97 | gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), |
| @@ -106,7 +106,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
| 106 | gvt->types[i].low_gm_size = min_low; | 106 | gvt->types[i].low_gm_size = min_low; |
| 107 | gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); | 107 | gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); |
| 108 | gvt->types[i].fence = 4; | 108 | gvt->types[i].fence = 4; |
| 109 | gvt->types[i].max_instance = low_avail / min_low; | 109 | gvt->types[i].max_instance = min(low_avail / min_low, |
| 110 | high_avail / gvt->types[i].high_gm_size); | ||
| 110 | gvt->types[i].avail_instance = gvt->types[i].max_instance; | 111 | gvt->types[i].avail_instance = gvt->types[i].max_instance; |
| 111 | 112 | ||
| 112 | if (IS_GEN8(gvt->dev_priv)) | 113 | if (IS_GEN8(gvt->dev_priv)) |
| @@ -142,9 +143,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) | |||
| 142 | /* Need to depend on maxium hw resource size but keep on | 143 | /* Need to depend on maxium hw resource size but keep on |
| 143 | * static config for now. | 144 | * static config for now. |
| 144 | */ | 145 | */ |
| 145 | low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - | 146 | low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE - |
| 146 | gvt->gm.vgpu_allocated_low_gm_size; | 147 | gvt->gm.vgpu_allocated_low_gm_size; |
| 147 | high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - | 148 | high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE - |
| 148 | gvt->gm.vgpu_allocated_high_gm_size; | 149 | gvt->gm.vgpu_allocated_high_gm_size; |
| 149 | fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - | 150 | fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - |
| 150 | gvt->fence.vgpu_allocated_fence_num; | 151 | gvt->fence.vgpu_allocated_fence_num; |
| @@ -384,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | |||
| 384 | intel_vgpu_reset_resource(vgpu); | 385 | intel_vgpu_reset_resource(vgpu); |
| 385 | intel_vgpu_reset_mmio(vgpu); | 386 | intel_vgpu_reset_mmio(vgpu); |
| 386 | populate_pvinfo_page(vgpu); | 387 | populate_pvinfo_page(vgpu); |
| 388 | intel_vgpu_reset_display(vgpu); | ||
| 387 | 389 | ||
| 388 | if (dmlr) | 390 | if (dmlr) |
| 389 | intel_vgpu_reset_cfg_space(vgpu); | 391 | intel_vgpu_reset_cfg_space(vgpu); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4ae69ebe166e..f6017f2cfb86 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) | |||
| 213 | } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { | 213 | } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { |
| 214 | dev_priv->pch_type = PCH_KBP; | 214 | dev_priv->pch_type = PCH_KBP; |
| 215 | DRM_DEBUG_KMS("Found KabyPoint PCH\n"); | 215 | DRM_DEBUG_KMS("Found KabyPoint PCH\n"); |
| 216 | WARN_ON(!IS_KABYLAKE(dev_priv)); | 216 | WARN_ON(!IS_SKYLAKE(dev_priv) && |
| 217 | !IS_KABYLAKE(dev_priv)); | ||
| 217 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || | 218 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
| 218 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || | 219 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || |
| 219 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && | 220 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && |
| @@ -824,10 +825,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
| 824 | if (ret < 0) | 825 | if (ret < 0) |
| 825 | return ret; | 826 | return ret; |
| 826 | 827 | ||
| 827 | ret = intel_gvt_init(dev_priv); | ||
| 828 | if (ret < 0) | ||
| 829 | goto err_workqueues; | ||
| 830 | |||
| 831 | /* This must be called before any calls to HAS_PCH_* */ | 828 | /* This must be called before any calls to HAS_PCH_* */ |
| 832 | intel_detect_pch(dev_priv); | 829 | intel_detect_pch(dev_priv); |
| 833 | 830 | ||
| @@ -841,7 +838,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
| 841 | intel_init_audio_hooks(dev_priv); | 838 | intel_init_audio_hooks(dev_priv); |
| 842 | ret = i915_gem_load_init(dev_priv); | 839 | ret = i915_gem_load_init(dev_priv); |
| 843 | if (ret < 0) | 840 | if (ret < 0) |
| 844 | goto err_gvt; | 841 | goto err_workqueues; |
| 845 | 842 | ||
| 846 | intel_display_crc_init(dev_priv); | 843 | intel_display_crc_init(dev_priv); |
| 847 | 844 | ||
| @@ -853,8 +850,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
| 853 | 850 | ||
| 854 | return 0; | 851 | return 0; |
| 855 | 852 | ||
| 856 | err_gvt: | ||
| 857 | intel_gvt_cleanup(dev_priv); | ||
| 858 | err_workqueues: | 853 | err_workqueues: |
| 859 | i915_workqueues_cleanup(dev_priv); | 854 | i915_workqueues_cleanup(dev_priv); |
| 860 | return ret; | 855 | return ret; |
| @@ -1077,6 +1072,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
| 1077 | DRM_DEBUG_DRIVER("can't enable MSI"); | 1072 | DRM_DEBUG_DRIVER("can't enable MSI"); |
| 1078 | } | 1073 | } |
| 1079 | 1074 | ||
| 1075 | ret = intel_gvt_init(dev_priv); | ||
| 1076 | if (ret) | ||
| 1077 | goto out_ggtt; | ||
| 1078 | |||
| 1080 | return 0; | 1079 | return 0; |
| 1081 | 1080 | ||
| 1082 | out_ggtt: | 1081 | out_ggtt: |
| @@ -1290,6 +1289,8 @@ void i915_driver_unload(struct drm_device *dev) | |||
| 1290 | 1289 | ||
| 1291 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); | 1290 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
| 1292 | 1291 | ||
| 1292 | intel_gvt_cleanup(dev_priv); | ||
| 1293 | |||
| 1293 | i915_driver_unregister(dev_priv); | 1294 | i915_driver_unregister(dev_priv); |
| 1294 | 1295 | ||
| 1295 | drm_vblank_cleanup(dev); | 1296 | drm_vblank_cleanup(dev); |
| @@ -2377,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev) | |||
| 2377 | 2378 | ||
| 2378 | assert_forcewakes_inactive(dev_priv); | 2379 | assert_forcewakes_inactive(dev_priv); |
| 2379 | 2380 | ||
| 2380 | if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) | 2381 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
| 2381 | intel_hpd_poll_init(dev_priv); | 2382 | intel_hpd_poll_init(dev_priv); |
| 2382 | 2383 | ||
| 2383 | DRM_DEBUG_KMS("Device suspended\n"); | 2384 | DRM_DEBUG_KMS("Device suspended\n"); |
| @@ -2426,6 +2427,7 @@ static int intel_runtime_resume(struct device *kdev) | |||
| 2426 | * we can do is to hope that things will still work (and disable RPM). | 2427 | * we can do is to hope that things will still work (and disable RPM). |
| 2427 | */ | 2428 | */ |
| 2428 | i915_gem_init_swizzling(dev_priv); | 2429 | i915_gem_init_swizzling(dev_priv); |
| 2430 | i915_gem_restore_fences(dev_priv); | ||
| 2429 | 2431 | ||
| 2430 | intel_runtime_pm_enable_interrupts(dev_priv); | 2432 | intel_runtime_pm_enable_interrupts(dev_priv); |
| 2431 | 2433 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 244628065f94..e44c598ecb82 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -2242,6 +2242,11 @@ struct drm_i915_private { | |||
| 2242 | 2242 | ||
| 2243 | struct i915_frontbuffer_tracking fb_tracking; | 2243 | struct i915_frontbuffer_tracking fb_tracking; |
| 2244 | 2244 | ||
| 2245 | struct intel_atomic_helper { | ||
| 2246 | struct llist_head free_list; | ||
| 2247 | struct work_struct free_work; | ||
| 2248 | } atomic_helper; | ||
| 2249 | |||
| 2245 | u16 orig_clock; | 2250 | u16 orig_clock; |
| 2246 | 2251 | ||
| 2247 | bool mchbar_need_disable; | 2252 | bool mchbar_need_disable; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c8689892a89f..88f3628b4e29 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -440,7 +440,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
| 440 | timeout = i915_gem_object_wait_fence(shared[i], | 440 | timeout = i915_gem_object_wait_fence(shared[i], |
| 441 | flags, timeout, | 441 | flags, timeout, |
| 442 | rps); | 442 | rps); |
| 443 | if (timeout <= 0) | 443 | if (timeout < 0) |
| 444 | break; | 444 | break; |
| 445 | 445 | ||
| 446 | dma_fence_put(shared[i]); | 446 | dma_fence_put(shared[i]); |
| @@ -453,7 +453,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
| 453 | excl = reservation_object_get_excl_rcu(resv); | 453 | excl = reservation_object_get_excl_rcu(resv); |
| 454 | } | 454 | } |
| 455 | 455 | ||
| 456 | if (excl && timeout > 0) | 456 | if (excl && timeout >= 0) |
| 457 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); | 457 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); |
| 458 | 458 | ||
| 459 | dma_fence_put(excl); | 459 | dma_fence_put(excl); |
| @@ -2009,8 +2009,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) | |||
| 2009 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 2009 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
| 2010 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 2010 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
| 2011 | 2011 | ||
| 2012 | if (WARN_ON(reg->pin_count)) | 2012 | /* Ideally we want to assert that the fence register is not |
| 2013 | continue; | 2013 | * live at this point (i.e. that no piece of code will be |
| 2014 | * trying to write through fence + GTT, as that both violates | ||
| 2015 | * our tracking of activity and associated locking/barriers, | ||
| 2016 | * but also is illegal given that the hw is powered down). | ||
| 2017 | * | ||
| 2018 | * Previously we used reg->pin_count as a "liveness" indicator. | ||
| 2019 | * That is not sufficient, and we need a more fine-grained | ||
| 2020 | * tool if we want to have a sanity check here. | ||
| 2021 | */ | ||
| 2014 | 2022 | ||
| 2015 | if (!reg->vma) | 2023 | if (!reg->vma) |
| 2016 | continue; | 2024 | continue; |
| @@ -2735,21 +2743,17 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) | |||
| 2735 | engine->irq_seqno_barrier(engine); | 2743 | engine->irq_seqno_barrier(engine); |
| 2736 | 2744 | ||
| 2737 | request = i915_gem_find_active_request(engine); | 2745 | request = i915_gem_find_active_request(engine); |
| 2738 | if (!request) | 2746 | if (request && i915_gem_reset_request(request)) { |
| 2739 | return; | 2747 | DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", |
| 2748 | engine->name, request->global_seqno); | ||
| 2740 | 2749 | ||
| 2741 | if (!i915_gem_reset_request(request)) | 2750 | /* If this context is now banned, skip all pending requests. */ |
| 2742 | return; | 2751 | if (i915_gem_context_is_banned(request->ctx)) |
| 2743 | 2752 | engine_skip_context(request); | |
| 2744 | DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", | 2753 | } |
| 2745 | engine->name, request->global_seqno); | ||
| 2746 | 2754 | ||
| 2747 | /* Setup the CS to resume from the breadcrumb of the hung request */ | 2755 | /* Setup the CS to resume from the breadcrumb of the hung request */ |
| 2748 | engine->reset_hw(engine, request); | 2756 | engine->reset_hw(engine, request); |
| 2749 | |||
| 2750 | /* If this context is now banned, skip all of its pending requests. */ | ||
| 2751 | if (i915_gem_context_is_banned(request->ctx)) | ||
| 2752 | engine_skip_context(request); | ||
| 2753 | } | 2757 | } |
| 2754 | 2758 | ||
| 2755 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | 2759 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv) |
| @@ -3517,7 +3521,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
| 3517 | vma->display_alignment = max_t(u64, vma->display_alignment, alignment); | 3521 | vma->display_alignment = max_t(u64, vma->display_alignment, alignment); |
| 3518 | 3522 | ||
| 3519 | /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ | 3523 | /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ |
| 3520 | if (obj->cache_dirty) { | 3524 | if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) { |
| 3521 | i915_gem_clflush_object(obj, true); | 3525 | i915_gem_clflush_object(obj, true); |
| 3522 | intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); | 3526 | intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); |
| 3523 | } | 3527 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 57bec08e80c5..d02cfaefe1c8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -1180,14 +1180,14 @@ validate_exec_list(struct drm_device *dev, | |||
| 1180 | if (exec[i].offset != | 1180 | if (exec[i].offset != |
| 1181 | gen8_canonical_addr(exec[i].offset & PAGE_MASK)) | 1181 | gen8_canonical_addr(exec[i].offset & PAGE_MASK)) |
| 1182 | return -EINVAL; | 1182 | return -EINVAL; |
| 1183 | |||
| 1184 | /* From drm_mm perspective address space is continuous, | ||
| 1185 | * so from this point we're always using non-canonical | ||
| 1186 | * form internally. | ||
| 1187 | */ | ||
| 1188 | exec[i].offset = gen8_noncanonical_addr(exec[i].offset); | ||
| 1189 | } | 1183 | } |
| 1190 | 1184 | ||
| 1185 | /* From drm_mm perspective address space is continuous, | ||
| 1186 | * so from this point we're always using non-canonical | ||
| 1187 | * form internally. | ||
| 1188 | */ | ||
| 1189 | exec[i].offset = gen8_noncanonical_addr(exec[i].offset); | ||
| 1190 | |||
| 1191 | if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) | 1191 | if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) |
| 1192 | return -EINVAL; | 1192 | return -EINVAL; |
| 1193 | 1193 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 30d8dbd04f0b..2801a4d56324 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -755,9 +755,10 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, | |||
| 755 | GEM_BUG_ON(pte_end > GEN8_PTES); | 755 | GEM_BUG_ON(pte_end > GEN8_PTES); |
| 756 | 756 | ||
| 757 | bitmap_clear(pt->used_ptes, pte, num_entries); | 757 | bitmap_clear(pt->used_ptes, pte, num_entries); |
| 758 | 758 | if (USES_FULL_PPGTT(vm->i915)) { | |
| 759 | if (bitmap_empty(pt->used_ptes, GEN8_PTES)) | 759 | if (bitmap_empty(pt->used_ptes, GEN8_PTES)) |
| 760 | return true; | 760 | return true; |
| 761 | } | ||
| 761 | 762 | ||
| 762 | pt_vaddr = kmap_px(pt); | 763 | pt_vaddr = kmap_px(pt); |
| 763 | 764 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 17ce53d0d092..933019e1b206 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c | |||
| @@ -46,16 +46,39 @@ static struct sg_table * | |||
| 46 | i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | 46 | i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) |
| 47 | { | 47 | { |
| 48 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 48 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| 49 | unsigned int npages = obj->base.size / PAGE_SIZE; | ||
| 50 | struct sg_table *st; | 49 | struct sg_table *st; |
| 51 | struct scatterlist *sg; | 50 | struct scatterlist *sg; |
| 51 | unsigned int npages; | ||
| 52 | int max_order; | 52 | int max_order; |
| 53 | gfp_t gfp; | 53 | gfp_t gfp; |
| 54 | 54 | ||
| 55 | max_order = MAX_ORDER; | ||
| 56 | #ifdef CONFIG_SWIOTLB | ||
| 57 | if (swiotlb_nr_tbl()) { | ||
| 58 | unsigned int max_segment; | ||
| 59 | |||
| 60 | max_segment = swiotlb_max_segment(); | ||
| 61 | if (max_segment) { | ||
| 62 | max_segment = max_t(unsigned int, max_segment, | ||
| 63 | PAGE_SIZE) >> PAGE_SHIFT; | ||
| 64 | max_order = min(max_order, ilog2(max_segment)); | ||
| 65 | } | ||
| 66 | } | ||
| 67 | #endif | ||
| 68 | |||
| 69 | gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; | ||
| 70 | if (IS_I965GM(i915) || IS_I965G(i915)) { | ||
| 71 | /* 965gm cannot relocate objects above 4GiB. */ | ||
| 72 | gfp &= ~__GFP_HIGHMEM; | ||
| 73 | gfp |= __GFP_DMA32; | ||
| 74 | } | ||
| 75 | |||
| 76 | create_st: | ||
| 55 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 77 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 56 | if (!st) | 78 | if (!st) |
| 57 | return ERR_PTR(-ENOMEM); | 79 | return ERR_PTR(-ENOMEM); |
| 58 | 80 | ||
| 81 | npages = obj->base.size / PAGE_SIZE; | ||
| 59 | if (sg_alloc_table(st, npages, GFP_KERNEL)) { | 82 | if (sg_alloc_table(st, npages, GFP_KERNEL)) { |
| 60 | kfree(st); | 83 | kfree(st); |
| 61 | return ERR_PTR(-ENOMEM); | 84 | return ERR_PTR(-ENOMEM); |
| @@ -64,19 +87,6 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | |||
| 64 | sg = st->sgl; | 87 | sg = st->sgl; |
| 65 | st->nents = 0; | 88 | st->nents = 0; |
| 66 | 89 | ||
| 67 | max_order = MAX_ORDER; | ||
| 68 | #ifdef CONFIG_SWIOTLB | ||
| 69 | if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ | ||
| 70 | max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); | ||
| 71 | #endif | ||
| 72 | |||
| 73 | gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; | ||
| 74 | if (IS_I965GM(i915) || IS_I965G(i915)) { | ||
| 75 | /* 965gm cannot relocate objects above 4GiB. */ | ||
| 76 | gfp &= ~__GFP_HIGHMEM; | ||
| 77 | gfp |= __GFP_DMA32; | ||
| 78 | } | ||
| 79 | |||
| 80 | do { | 90 | do { |
| 81 | int order = min(fls(npages) - 1, max_order); | 91 | int order = min(fls(npages) - 1, max_order); |
| 82 | struct page *page; | 92 | struct page *page; |
| @@ -104,8 +114,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | |||
| 104 | sg = __sg_next(sg); | 114 | sg = __sg_next(sg); |
| 105 | } while (1); | 115 | } while (1); |
| 106 | 116 | ||
| 107 | if (i915_gem_gtt_prepare_pages(obj, st)) | 117 | if (i915_gem_gtt_prepare_pages(obj, st)) { |
| 118 | /* Failed to dma-map try again with single page sg segments */ | ||
| 119 | if (get_order(st->sgl->length)) { | ||
| 120 | internal_free_pages(st); | ||
| 121 | max_order = 0; | ||
| 122 | goto create_st; | ||
| 123 | } | ||
| 108 | goto err; | 124 | goto err; |
| 125 | } | ||
| 109 | 126 | ||
| 110 | /* Mark the pages as dontneed whilst they are still pinned. As soon | 127 | /* Mark the pages as dontneed whilst they are still pinned. As soon |
| 111 | * as they are unpinned they are allowed to be reaped by the shrinker, | 128 | * as they are unpinned they are allowed to be reaped by the shrinker, |
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 72b7f7d9461d..f31deeb72703 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
| @@ -1025,8 +1025,13 @@ __i915_request_wait_for_execute(struct drm_i915_gem_request *request, | |||
| 1025 | break; | 1025 | break; |
| 1026 | } | 1026 | } |
| 1027 | 1027 | ||
| 1028 | if (!timeout) { | ||
| 1029 | timeout = -ETIME; | ||
| 1030 | break; | ||
| 1031 | } | ||
| 1032 | |||
| 1028 | timeout = io_schedule_timeout(timeout); | 1033 | timeout = io_schedule_timeout(timeout); |
| 1029 | } while (timeout); | 1034 | } while (1); |
| 1030 | finish_wait(&request->execute.wait, &wait); | 1035 | finish_wait(&request->execute.wait, &wait); |
| 1031 | 1036 | ||
| 1032 | if (flags & I915_WAIT_LOCKED) | 1037 | if (flags & I915_WAIT_LOCKED) |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index ec7c5d80fe4f..9673bcc3b6ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
| @@ -405,6 +405,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) | |||
| 405 | 405 | ||
| 406 | mutex_init(&dev_priv->mm.stolen_lock); | 406 | mutex_init(&dev_priv->mm.stolen_lock); |
| 407 | 407 | ||
| 408 | if (intel_vgpu_active(dev_priv)) { | ||
| 409 | DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); | ||
| 410 | return 0; | ||
| 411 | } | ||
| 412 | |||
| 408 | #ifdef CONFIG_INTEL_IOMMU | 413 | #ifdef CONFIG_INTEL_IOMMU |
| 409 | if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) { | 414 | if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) { |
| 410 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); | 415 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b1361cfd4c5c..974ac08df473 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -173,7 +173,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj, | |||
| 173 | else | 173 | else |
| 174 | tile_width = 512; | 174 | tile_width = 512; |
| 175 | 175 | ||
| 176 | if (!IS_ALIGNED(stride, tile_width)) | 176 | if (!stride || !IS_ALIGNED(stride, tile_width)) |
| 177 | return false; | 177 | return false; |
| 178 | 178 | ||
| 179 | /* 965+ just needs multiples of tile width */ | 179 | /* 965+ just needs multiples of tile width */ |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ee313247673b..53bb7de6020d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -3123,19 +3123,16 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
| 3123 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 3123 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
| 3124 | } | 3124 | } |
| 3125 | 3125 | ||
| 3126 | static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) | 3126 | static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) |
| 3127 | { | 3127 | { |
| 3128 | u32 hotplug_irqs, hotplug, enabled_irqs; | 3128 | u32 hotplug; |
| 3129 | |||
| 3130 | hotplug_irqs = SDE_HOTPLUG_MASK_SPT; | ||
| 3131 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); | ||
| 3132 | |||
| 3133 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | ||
| 3134 | 3129 | ||
| 3135 | /* Enable digital hotplug on the PCH */ | 3130 | /* Enable digital hotplug on the PCH */ |
| 3136 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | 3131 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
| 3137 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | | 3132 | hotplug |= PORTA_HOTPLUG_ENABLE | |
| 3138 | PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; | 3133 | PORTB_HOTPLUG_ENABLE | |
| 3134 | PORTC_HOTPLUG_ENABLE | | ||
| 3135 | PORTD_HOTPLUG_ENABLE; | ||
| 3139 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 3136 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
| 3140 | 3137 | ||
| 3141 | hotplug = I915_READ(PCH_PORT_HOTPLUG2); | 3138 | hotplug = I915_READ(PCH_PORT_HOTPLUG2); |
| @@ -3143,6 +3140,18 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
| 3143 | I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); | 3140 | I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); |
| 3144 | } | 3141 | } |
| 3145 | 3142 | ||
| 3143 | static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) | ||
| 3144 | { | ||
| 3145 | u32 hotplug_irqs, enabled_irqs; | ||
| 3146 | |||
| 3147 | hotplug_irqs = SDE_HOTPLUG_MASK_SPT; | ||
| 3148 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); | ||
| 3149 | |||
| 3150 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | ||
| 3151 | |||
| 3152 | spt_hpd_detection_setup(dev_priv); | ||
| 3153 | } | ||
| 3154 | |||
| 3146 | static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) | 3155 | static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) |
| 3147 | { | 3156 | { |
| 3148 | u32 hotplug_irqs, hotplug, enabled_irqs; | 3157 | u32 hotplug_irqs, hotplug, enabled_irqs; |
| @@ -3177,18 +3186,15 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
| 3177 | ibx_hpd_irq_setup(dev_priv); | 3186 | ibx_hpd_irq_setup(dev_priv); |
| 3178 | } | 3187 | } |
| 3179 | 3188 | ||
| 3180 | static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) | 3189 | static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, |
| 3190 | u32 enabled_irqs) | ||
| 3181 | { | 3191 | { |
| 3182 | u32 hotplug_irqs, hotplug, enabled_irqs; | 3192 | u32 hotplug; |
| 3183 | |||
| 3184 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); | ||
| 3185 | hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; | ||
| 3186 | |||
| 3187 | bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); | ||
| 3188 | 3193 | ||
| 3189 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | 3194 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
| 3190 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | | 3195 | hotplug |= PORTA_HOTPLUG_ENABLE | |
| 3191 | PORTA_HOTPLUG_ENABLE; | 3196 | PORTB_HOTPLUG_ENABLE | |
| 3197 | PORTC_HOTPLUG_ENABLE; | ||
| 3192 | 3198 | ||
| 3193 | DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", | 3199 | DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", |
| 3194 | hotplug, enabled_irqs); | 3200 | hotplug, enabled_irqs); |
| @@ -3198,7 +3204,6 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
| 3198 | * For BXT invert bit has to be set based on AOB design | 3204 | * For BXT invert bit has to be set based on AOB design |
| 3199 | * for HPD detection logic, update it based on VBT fields. | 3205 | * for HPD detection logic, update it based on VBT fields. |
| 3200 | */ | 3206 | */ |
| 3201 | |||
| 3202 | if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && | 3207 | if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && |
| 3203 | intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) | 3208 | intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) |
| 3204 | hotplug |= BXT_DDIA_HPD_INVERT; | 3209 | hotplug |= BXT_DDIA_HPD_INVERT; |
| @@ -3212,6 +3217,23 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
| 3212 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 3217 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
| 3213 | } | 3218 | } |
| 3214 | 3219 | ||
| 3220 | static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) | ||
| 3221 | { | ||
| 3222 | __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); | ||
| 3223 | } | ||
| 3224 | |||
| 3225 | static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) | ||
| 3226 | { | ||
| 3227 | u32 hotplug_irqs, enabled_irqs; | ||
| 3228 | |||
| 3229 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); | ||
| 3230 | hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; | ||
| 3231 | |||
| 3232 | bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); | ||
| 3233 | |||
| 3234 | __bxt_hpd_detection_setup(dev_priv, enabled_irqs); | ||
| 3235 | } | ||
| 3236 | |||
| 3215 | static void ibx_irq_postinstall(struct drm_device *dev) | 3237 | static void ibx_irq_postinstall(struct drm_device *dev) |
| 3216 | { | 3238 | { |
| 3217 | struct drm_i915_private *dev_priv = to_i915(dev); | 3239 | struct drm_i915_private *dev_priv = to_i915(dev); |
| @@ -3227,6 +3249,12 @@ static void ibx_irq_postinstall(struct drm_device *dev) | |||
| 3227 | 3249 | ||
| 3228 | gen5_assert_iir_is_zero(dev_priv, SDEIIR); | 3250 | gen5_assert_iir_is_zero(dev_priv, SDEIIR); |
| 3229 | I915_WRITE(SDEIMR, ~mask); | 3251 | I915_WRITE(SDEIMR, ~mask); |
| 3252 | |||
| 3253 | if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || | ||
| 3254 | HAS_PCH_LPT(dev_priv)) | ||
| 3255 | ; /* TODO: Enable HPD detection on older PCH platforms too */ | ||
| 3256 | else | ||
| 3257 | spt_hpd_detection_setup(dev_priv); | ||
| 3230 | } | 3258 | } |
| 3231 | 3259 | ||
| 3232 | static void gen5_gt_irq_postinstall(struct drm_device *dev) | 3260 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
| @@ -3438,6 +3466,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 3438 | 3466 | ||
| 3439 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); | 3467 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); |
| 3440 | GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); | 3468 | GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); |
| 3469 | |||
| 3470 | if (IS_GEN9_LP(dev_priv)) | ||
| 3471 | bxt_hpd_detection_setup(dev_priv); | ||
| 3441 | } | 3472 | } |
| 3442 | 3473 | ||
| 3443 | static int gen8_irq_postinstall(struct drm_device *dev) | 3474 | static int gen8_irq_postinstall(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 72f9f36ae5ce..675323189f2c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -3307,8 +3307,10 @@ enum skl_disp_power_wells { | |||
| 3307 | /* | 3307 | /* |
| 3308 | * Logical Context regs | 3308 | * Logical Context regs |
| 3309 | */ | 3309 | */ |
| 3310 | #define CCID _MMIO(0x2180) | 3310 | #define CCID _MMIO(0x2180) |
| 3311 | #define CCID_EN (1<<0) | 3311 | #define CCID_EN BIT(0) |
| 3312 | #define CCID_EXTENDED_STATE_RESTORE BIT(2) | ||
| 3313 | #define CCID_EXTENDED_STATE_SAVE BIT(3) | ||
| 3312 | /* | 3314 | /* |
| 3313 | * Notes on SNB/IVB/VLV context size: | 3315 | * Notes on SNB/IVB/VLV context size: |
| 3314 | * - Power context is saved elsewhere (LLC or stolen) | 3316 | * - Power context is saved elsewhere (LLC or stolen) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 385e29af8baa..2bf5aca6e37c 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) | |||
| 499 | struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); | 499 | struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); |
| 500 | struct edid *edid; | 500 | struct edid *edid; |
| 501 | struct i2c_adapter *i2c; | 501 | struct i2c_adapter *i2c; |
| 502 | bool ret = false; | ||
| 502 | 503 | ||
| 503 | BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); | 504 | BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); |
| 504 | 505 | ||
| @@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) | |||
| 515 | */ | 516 | */ |
| 516 | if (!is_digital) { | 517 | if (!is_digital) { |
| 517 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | 518 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); |
| 518 | return true; | 519 | ret = true; |
| 520 | } else { | ||
| 521 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); | ||
| 519 | } | 522 | } |
| 520 | |||
| 521 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); | ||
| 522 | } else { | 523 | } else { |
| 523 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); | 524 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); |
| 524 | } | 525 | } |
| 525 | 526 | ||
| 526 | kfree(edid); | 527 | kfree(edid); |
| 527 | 528 | ||
| 528 | return false; | 529 | return ret; |
| 529 | } | 530 | } |
| 530 | 531 | ||
| 531 | static enum drm_connector_status | 532 | static enum drm_connector_status |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 36ecc864e711..a2fece5e9fb3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2578,8 +2578,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, | |||
| 2578 | * We only keep the x/y offsets, so push all of the | 2578 | * We only keep the x/y offsets, so push all of the |
| 2579 | * gtt offset into the x/y offsets. | 2579 | * gtt offset into the x/y offsets. |
| 2580 | */ | 2580 | */ |
| 2581 | _intel_adjust_tile_offset(&x, &y, tile_size, | 2581 | _intel_adjust_tile_offset(&x, &y, |
| 2582 | tile_width, tile_height, pitch_tiles, | 2582 | tile_width, tile_height, |
| 2583 | tile_size, pitch_tiles, | ||
| 2583 | gtt_offset_rotated * tile_size, 0); | 2584 | gtt_offset_rotated * tile_size, 0); |
| 2584 | 2585 | ||
| 2585 | gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; | 2586 | gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; |
| @@ -4253,10 +4254,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) | |||
| 4253 | drm_crtc_vblank_put(&intel_crtc->base); | 4254 | drm_crtc_vblank_put(&intel_crtc->base); |
| 4254 | 4255 | ||
| 4255 | wake_up_all(&dev_priv->pending_flip_queue); | 4256 | wake_up_all(&dev_priv->pending_flip_queue); |
| 4256 | queue_work(dev_priv->wq, &work->unpin_work); | ||
| 4257 | |||
| 4258 | trace_i915_flip_complete(intel_crtc->plane, | 4257 | trace_i915_flip_complete(intel_crtc->plane, |
| 4259 | work->pending_flip_obj); | 4258 | work->pending_flip_obj); |
| 4259 | |||
| 4260 | queue_work(dev_priv->wq, &work->unpin_work); | ||
| 4260 | } | 4261 | } |
| 4261 | 4262 | ||
| 4262 | static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 4263 | static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
| @@ -6882,6 +6883,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) | |||
| 6882 | } | 6883 | } |
| 6883 | 6884 | ||
| 6884 | state = drm_atomic_state_alloc(crtc->dev); | 6885 | state = drm_atomic_state_alloc(crtc->dev); |
| 6886 | if (!state) { | ||
| 6887 | DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", | ||
| 6888 | crtc->base.id, crtc->name); | ||
| 6889 | return; | ||
| 6890 | } | ||
| 6891 | |||
| 6885 | state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; | 6892 | state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; |
| 6886 | 6893 | ||
| 6887 | /* Everything's already locked, -EDEADLK can't happen. */ | 6894 | /* Everything's already locked, -EDEADLK can't happen. */ |
| @@ -14563,8 +14570,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, | |||
| 14563 | break; | 14570 | break; |
| 14564 | 14571 | ||
| 14565 | case FENCE_FREE: | 14572 | case FENCE_FREE: |
| 14566 | drm_atomic_state_put(&state->base); | 14573 | { |
| 14567 | break; | 14574 | struct intel_atomic_helper *helper = |
| 14575 | &to_i915(state->base.dev)->atomic_helper; | ||
| 14576 | |||
| 14577 | if (llist_add(&state->freed, &helper->free_list)) | ||
| 14578 | schedule_work(&helper->free_work); | ||
| 14579 | break; | ||
| 14580 | } | ||
| 14568 | } | 14581 | } |
| 14569 | 14582 | ||
| 14570 | return NOTIFY_DONE; | 14583 | return NOTIFY_DONE; |
| @@ -16587,6 +16600,18 @@ fail: | |||
| 16587 | drm_modeset_acquire_fini(&ctx); | 16600 | drm_modeset_acquire_fini(&ctx); |
| 16588 | } | 16601 | } |
| 16589 | 16602 | ||
| 16603 | static void intel_atomic_helper_free_state(struct work_struct *work) | ||
| 16604 | { | ||
| 16605 | struct drm_i915_private *dev_priv = | ||
| 16606 | container_of(work, typeof(*dev_priv), atomic_helper.free_work); | ||
| 16607 | struct intel_atomic_state *state, *next; | ||
| 16608 | struct llist_node *freed; | ||
| 16609 | |||
| 16610 | freed = llist_del_all(&dev_priv->atomic_helper.free_list); | ||
| 16611 | llist_for_each_entry_safe(state, next, freed, freed) | ||
| 16612 | drm_atomic_state_put(&state->base); | ||
| 16613 | } | ||
| 16614 | |||
| 16590 | int intel_modeset_init(struct drm_device *dev) | 16615 | int intel_modeset_init(struct drm_device *dev) |
| 16591 | { | 16616 | { |
| 16592 | struct drm_i915_private *dev_priv = to_i915(dev); | 16617 | struct drm_i915_private *dev_priv = to_i915(dev); |
| @@ -16606,6 +16631,9 @@ int intel_modeset_init(struct drm_device *dev) | |||
| 16606 | 16631 | ||
| 16607 | dev->mode_config.funcs = &intel_mode_funcs; | 16632 | dev->mode_config.funcs = &intel_mode_funcs; |
| 16608 | 16633 | ||
| 16634 | INIT_WORK(&dev_priv->atomic_helper.free_work, | ||
| 16635 | intel_atomic_helper_free_state); | ||
| 16636 | |||
| 16609 | intel_init_quirks(dev); | 16637 | intel_init_quirks(dev); |
| 16610 | 16638 | ||
| 16611 | intel_init_pm(dev_priv); | 16639 | intel_init_pm(dev_priv); |
| @@ -17263,6 +17291,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
| 17263 | { | 17291 | { |
| 17264 | struct drm_i915_private *dev_priv = to_i915(dev); | 17292 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 17265 | 17293 | ||
| 17294 | flush_work(&dev_priv->atomic_helper.free_work); | ||
| 17295 | WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); | ||
| 17296 | |||
| 17266 | intel_disable_gt_powersave(dev_priv); | 17297 | intel_disable_gt_powersave(dev_priv); |
| 17267 | 17298 | ||
| 17268 | /* | 17299 | /* |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3d8ac8aa7214..d1670b8afbf5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -2887,6 +2887,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) | |||
| 2887 | 2887 | ||
| 2888 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE); | 2888 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE); |
| 2889 | 2889 | ||
| 2890 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) | ||
| 2891 | return; | ||
| 2892 | |||
| 2890 | edp_panel_vdd_off_sync(intel_dp); | 2893 | edp_panel_vdd_off_sync(intel_dp); |
| 2891 | 2894 | ||
| 2892 | /* | 2895 | /* |
| @@ -2914,9 +2917,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, | |||
| 2914 | 2917 | ||
| 2915 | lockdep_assert_held(&dev_priv->pps_mutex); | 2918 | lockdep_assert_held(&dev_priv->pps_mutex); |
| 2916 | 2919 | ||
| 2917 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) | ||
| 2918 | return; | ||
| 2919 | |||
| 2920 | for_each_intel_encoder(dev, encoder) { | 2920 | for_each_intel_encoder(dev, encoder) { |
| 2921 | struct intel_dp *intel_dp; | 2921 | struct intel_dp *intel_dp; |
| 2922 | enum port port; | 2922 | enum port port; |
| @@ -4406,8 +4406,8 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, | |||
| 4406 | * | 4406 | * |
| 4407 | * Return %true if @port is connected, %false otherwise. | 4407 | * Return %true if @port is connected, %false otherwise. |
| 4408 | */ | 4408 | */ |
| 4409 | static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | 4409 | bool intel_digital_port_connected(struct drm_i915_private *dev_priv, |
| 4410 | struct intel_digital_port *port) | 4410 | struct intel_digital_port *port) |
| 4411 | { | 4411 | { |
| 4412 | if (HAS_PCH_IBX(dev_priv)) | 4412 | if (HAS_PCH_IBX(dev_priv)) |
| 4413 | return ibx_digital_port_connected(dev_priv, port); | 4413 | return ibx_digital_port_connected(dev_priv, port); |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index c92a2558beb4..e59e43a9f3a6 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
| @@ -1855,7 +1855,8 @@ bxt_get_dpll(struct intel_crtc *crtc, | |||
| 1855 | return NULL; | 1855 | return NULL; |
| 1856 | 1856 | ||
| 1857 | if ((encoder->type == INTEL_OUTPUT_DP || | 1857 | if ((encoder->type == INTEL_OUTPUT_DP || |
| 1858 | encoder->type == INTEL_OUTPUT_EDP) && | 1858 | encoder->type == INTEL_OUTPUT_EDP || |
| 1859 | encoder->type == INTEL_OUTPUT_DP_MST) && | ||
| 1859 | !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) | 1860 | !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) |
| 1860 | return NULL; | 1861 | return NULL; |
| 1861 | 1862 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0cec0013ace0..40fed65a791d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -371,6 +371,8 @@ struct intel_atomic_state { | |||
| 371 | struct skl_wm_values wm_results; | 371 | struct skl_wm_values wm_results; |
| 372 | 372 | ||
| 373 | struct i915_sw_fence commit_ready; | 373 | struct i915_sw_fence commit_ready; |
| 374 | |||
| 375 | struct llist_node freed; | ||
| 374 | }; | 376 | }; |
| 375 | 377 | ||
| 376 | struct intel_plane_state { | 378 | struct intel_plane_state { |
| @@ -1485,6 +1487,8 @@ bool __intel_dp_read_desc(struct intel_dp *intel_dp, | |||
| 1485 | bool intel_dp_read_desc(struct intel_dp *intel_dp); | 1487 | bool intel_dp_read_desc(struct intel_dp *intel_dp); |
| 1486 | int intel_dp_link_required(int pixel_clock, int bpp); | 1488 | int intel_dp_link_required(int pixel_clock, int bpp); |
| 1487 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); | 1489 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); |
| 1490 | bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | ||
| 1491 | struct intel_digital_port *port); | ||
| 1488 | 1492 | ||
| 1489 | /* intel_dp_aux_backlight.c */ | 1493 | /* intel_dp_aux_backlight.c */ |
| 1490 | int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); | 1494 | int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); |
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 290384e86c63..d23c0fcff751 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c | |||
| @@ -67,6 +67,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) | |||
| 67 | return 0; | 67 | return 0; |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | if (intel_vgpu_active(dev_priv)) { | ||
| 71 | DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n"); | ||
| 72 | goto bail; | ||
| 73 | } | ||
| 74 | |||
| 70 | if (!is_supported_device(dev_priv)) { | 75 | if (!is_supported_device(dev_priv)) { |
| 71 | DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); | 76 | DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); |
| 72 | goto bail; | 77 | goto bail; |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 432ee495dec2..ebf8023d21e6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -360,7 +360,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) | |||
| 360 | static u64 execlists_update_context(struct drm_i915_gem_request *rq) | 360 | static u64 execlists_update_context(struct drm_i915_gem_request *rq) |
| 361 | { | 361 | { |
| 362 | struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; | 362 | struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; |
| 363 | struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; | 363 | struct i915_hw_ppgtt *ppgtt = |
| 364 | rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; | ||
| 364 | u32 *reg_state = ce->lrc_reg_state; | 365 | u32 *reg_state = ce->lrc_reg_state; |
| 365 | 366 | ||
| 366 | reg_state[CTX_RING_TAIL+1] = rq->tail; | 367 | reg_state[CTX_RING_TAIL+1] = rq->tail; |
| @@ -1389,7 +1390,20 @@ static void reset_common_ring(struct intel_engine_cs *engine, | |||
| 1389 | { | 1390 | { |
| 1390 | struct drm_i915_private *dev_priv = engine->i915; | 1391 | struct drm_i915_private *dev_priv = engine->i915; |
| 1391 | struct execlist_port *port = engine->execlist_port; | 1392 | struct execlist_port *port = engine->execlist_port; |
| 1392 | struct intel_context *ce = &request->ctx->engine[engine->id]; | 1393 | struct intel_context *ce; |
| 1394 | |||
| 1395 | /* If the request was innocent, we leave the request in the ELSP | ||
| 1396 | * and will try to replay it on restarting. The context image may | ||
| 1397 | * have been corrupted by the reset, in which case we may have | ||
| 1398 | * to service a new GPU hang, but more likely we can continue on | ||
| 1399 | * without impact. | ||
| 1400 | * | ||
| 1401 | * If the request was guilty, we presume the context is corrupt | ||
| 1402 | * and have to at least restore the RING register in the context | ||
| 1403 | * image back to the expected values to skip over the guilty request. | ||
| 1404 | */ | ||
| 1405 | if (!request || request->fence.error != -EIO) | ||
| 1406 | return; | ||
| 1393 | 1407 | ||
| 1394 | /* We want a simple context + ring to execute the breadcrumb update. | 1408 | /* We want a simple context + ring to execute the breadcrumb update. |
| 1395 | * We cannot rely on the context being intact across the GPU hang, | 1409 | * We cannot rely on the context being intact across the GPU hang, |
| @@ -1398,6 +1412,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, | |||
| 1398 | * future request will be after userspace has had the opportunity | 1412 | * future request will be after userspace has had the opportunity |
| 1399 | * to recreate its own state. | 1413 | * to recreate its own state. |
| 1400 | */ | 1414 | */ |
| 1415 | ce = &request->ctx->engine[engine->id]; | ||
| 1401 | execlists_init_reg_state(ce->lrc_reg_state, | 1416 | execlists_init_reg_state(ce->lrc_reg_state, |
| 1402 | request->ctx, engine, ce->ring); | 1417 | request->ctx, engine, ce->ring); |
| 1403 | 1418 | ||
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index f6d4e6940257..c300647ef604 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
| @@ -158,6 +158,8 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) | |||
| 158 | static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) | 158 | static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) |
| 159 | { | 159 | { |
| 160 | struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); | 160 | struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); |
| 161 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
| 162 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
| 161 | unsigned long start = jiffies; | 163 | unsigned long start = jiffies; |
| 162 | 164 | ||
| 163 | if (!lspcon->desc_valid) | 165 | if (!lspcon->desc_valid) |
| @@ -173,7 +175,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) | |||
| 173 | if (!__intel_dp_read_desc(intel_dp, &desc)) | 175 | if (!__intel_dp_read_desc(intel_dp, &desc)) |
| 174 | return; | 176 | return; |
| 175 | 177 | ||
| 176 | if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) { | 178 | if (intel_digital_port_connected(dev_priv, dig_port) && |
| 179 | !memcmp(&intel_dp->desc, &desc, sizeof(desc))) { | ||
| 177 | DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n", | 180 | DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n", |
| 178 | jiffies_to_msecs(jiffies - start)); | 181 | jiffies_to_msecs(jiffies - start)); |
| 179 | return; | 182 | return; |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index f4429f67a4e3..4a862a358c70 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
| @@ -982,7 +982,18 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
| 982 | opregion->vbt_size = vbt_size; | 982 | opregion->vbt_size = vbt_size; |
| 983 | } else { | 983 | } else { |
| 984 | vbt = base + OPREGION_VBT_OFFSET; | 984 | vbt = base + OPREGION_VBT_OFFSET; |
| 985 | vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET; | 985 | /* |
| 986 | * The VBT specification says that if the ASLE ext | ||
| 987 | * mailbox is not used its area is reserved, but | ||
| 988 | * on some CHT boards the VBT extends into the | ||
| 989 | * ASLE ext area. Allow this even though it is | ||
| 990 | * against the spec, so we do not end up rejecting | ||
| 991 | * the VBT on those boards (and end up not finding the | ||
| 992 | * LCD panel because of this). | ||
| 993 | */ | ||
| 994 | vbt_size = (mboxes & MBOX_ASLE_EXT) ? | ||
| 995 | OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; | ||
| 996 | vbt_size -= OPREGION_VBT_OFFSET; | ||
| 986 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | 997 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { |
| 987 | DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); | 998 | DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); |
| 988 | opregion->vbt = vbt; | 999 | opregion->vbt = vbt; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 69035e4f9b3b..91bc4abf5d3e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -599,10 +599,62 @@ out: | |||
| 599 | static void reset_ring_common(struct intel_engine_cs *engine, | 599 | static void reset_ring_common(struct intel_engine_cs *engine, |
| 600 | struct drm_i915_gem_request *request) | 600 | struct drm_i915_gem_request *request) |
| 601 | { | 601 | { |
| 602 | struct intel_ring *ring = request->ring; | 602 | /* Try to restore the logical GPU state to match the continuation |
| 603 | * of the request queue. If we skip the context/PD restore, then | ||
| 604 | * the next request may try to execute assuming that its context | ||
| 605 | * is valid and loaded on the GPU and so may try to access invalid | ||
| 606 | * memory, prompting repeated GPU hangs. | ||
| 607 | * | ||
| 608 | * If the request was guilty, we still restore the logical state | ||
| 609 | * in case the next request requires it (e.g. the aliasing ppgtt), | ||
| 610 | * but skip over the hung batch. | ||
| 611 | * | ||
| 612 | * If the request was innocent, we try to replay the request with | ||
| 613 | * the restored context. | ||
| 614 | */ | ||
| 615 | if (request) { | ||
| 616 | struct drm_i915_private *dev_priv = request->i915; | ||
| 617 | struct intel_context *ce = &request->ctx->engine[engine->id]; | ||
| 618 | struct i915_hw_ppgtt *ppgtt; | ||
| 619 | |||
| 620 | /* FIXME consider gen8 reset */ | ||
| 621 | |||
| 622 | if (ce->state) { | ||
| 623 | I915_WRITE(CCID, | ||
| 624 | i915_ggtt_offset(ce->state) | | ||
| 625 | BIT(8) /* must be set! */ | | ||
| 626 | CCID_EXTENDED_STATE_SAVE | | ||
| 627 | CCID_EXTENDED_STATE_RESTORE | | ||
| 628 | CCID_EN); | ||
| 629 | } | ||
| 603 | 630 | ||
| 604 | ring->head = request->postfix; | 631 | ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt; |
| 605 | ring->last_retired_head = -1; | 632 | if (ppgtt) { |
| 633 | u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; | ||
| 634 | |||
| 635 | I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); | ||
| 636 | I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset); | ||
| 637 | |||
| 638 | /* Wait for the PD reload to complete */ | ||
| 639 | if (intel_wait_for_register(dev_priv, | ||
| 640 | RING_PP_DIR_BASE(engine), | ||
| 641 | BIT(0), 0, | ||
| 642 | 10)) | ||
| 643 | DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n"); | ||
| 644 | |||
| 645 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | ||
| 646 | } | ||
| 647 | |||
| 648 | /* If the rq hung, jump to its breadcrumb and skip the batch */ | ||
| 649 | if (request->fence.error == -EIO) { | ||
| 650 | struct intel_ring *ring = request->ring; | ||
| 651 | |||
| 652 | ring->head = request->postfix; | ||
| 653 | ring->last_retired_head = -1; | ||
| 654 | } | ||
| 655 | } else { | ||
| 656 | engine->legacy_active_context = NULL; | ||
| 657 | } | ||
| 606 | } | 658 | } |
| 607 | 659 | ||
| 608 | static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) | 660 | static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) |
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 45dceb672e20..4b7b92a7bcf7 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
| @@ -255,8 +255,8 @@ static int imx_drm_bind(struct device *dev) | |||
| 255 | * this value would be used to check framebuffer size limitation | 255 | * this value would be used to check framebuffer size limitation |
| 256 | * at drm_mode_addfb(). | 256 | * at drm_mode_addfb(). |
| 257 | */ | 257 | */ |
| 258 | drm->mode_config.min_width = 64; | 258 | drm->mode_config.min_width = 1; |
| 259 | drm->mode_config.min_height = 64; | 259 | drm->mode_config.min_height = 1; |
| 260 | drm->mode_config.max_width = 4096; | 260 | drm->mode_config.max_width = 4096; |
| 261 | drm->mode_config.max_height = 4096; | 261 | drm->mode_config.max_height = 4096; |
| 262 | drm->mode_config.funcs = &imx_drm_mode_config_funcs; | 262 | drm->mode_config.funcs = &imx_drm_mode_config_funcs; |
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 8f8aa4a63122..4826bb781723 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c | |||
| @@ -98,6 +98,8 @@ | |||
| 98 | /* TVE_TST_MODE_REG */ | 98 | /* TVE_TST_MODE_REG */ |
| 99 | #define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0) | 99 | #define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0) |
| 100 | 100 | ||
| 101 | #define IMX_TVE_DAC_VOLTAGE 2750000 | ||
| 102 | |||
| 101 | enum { | 103 | enum { |
| 102 | TVE_MODE_TVOUT, | 104 | TVE_MODE_TVOUT, |
| 103 | TVE_MODE_VGA, | 105 | TVE_MODE_VGA, |
| @@ -616,9 +618,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) | |||
| 616 | 618 | ||
| 617 | tve->dac_reg = devm_regulator_get(dev, "dac"); | 619 | tve->dac_reg = devm_regulator_get(dev, "dac"); |
| 618 | if (!IS_ERR(tve->dac_reg)) { | 620 | if (!IS_ERR(tve->dac_reg)) { |
| 619 | ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000); | 621 | if (regulator_get_voltage(tve->dac_reg) != IMX_TVE_DAC_VOLTAGE) |
| 620 | if (ret) | 622 | dev_warn(dev, "dac voltage is not %d uV\n", IMX_TVE_DAC_VOLTAGE); |
| 621 | return ret; | ||
| 622 | ret = regulator_enable(tve->dac_reg); | 623 | ret = regulator_enable(tve->dac_reg); |
| 623 | if (ret) | 624 | if (ret) |
| 624 | return ret; | 625 | return ret; |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 7f78da695dff..5b8e23d051f2 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
| @@ -72,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY | |||
| 72 | help | 72 | help |
| 73 | Choose this option if the 28nm DSI PHY 8960 variant is used on the | 73 | Choose this option if the 28nm DSI PHY 8960 variant is used on the |
| 74 | platform. | 74 | platform. |
| 75 | |||
| 76 | config DRM_MSM_DSI_14NM_PHY | ||
| 77 | bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)" | ||
| 78 | depends on DRM_MSM_DSI | ||
| 79 | default y | ||
| 80 | help | ||
| 81 | Choose this option if DSI PHY on 8996 is used on the platform. | ||
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 028c24df2291..39055362da95 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile | |||
| @@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ | |||
| 76 | msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o | 76 | msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o |
| 77 | msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o | 77 | msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o |
| 78 | msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o | 78 | msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o |
| 79 | msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o | ||
| 79 | 80 | ||
| 80 | ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) | 81 | ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) |
| 81 | msm-y += dsi/pll/dsi_pll.o | 82 | msm-y += dsi/pll/dsi_pll.o |
| 82 | msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o | 83 | msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o |
| 83 | msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o | 84 | msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o |
| 85 | msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o | ||
| 84 | endif | 86 | endif |
| 85 | 87 | ||
| 86 | obj-$(CONFIG_DRM_MSM) += msm.o | 88 | obj-$(CONFIG_DRM_MSM) += msm.o |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b8647198c11c..4414cf73735d 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include "msm_gem.h" | 14 | #include "msm_gem.h" |
| 15 | #include "msm_mmu.h" | ||
| 15 | #include "a5xx_gpu.h" | 16 | #include "a5xx_gpu.h" |
| 16 | 17 | ||
| 17 | extern bool hang_debug; | 18 | extern bool hang_debug; |
| @@ -327,7 +328,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) | |||
| 327 | /* Enable RBBM error reporting bits */ | 328 | /* Enable RBBM error reporting bits */ |
| 328 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); | 329 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); |
| 329 | 330 | ||
| 330 | if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { | 331 | if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { |
| 331 | /* | 332 | /* |
| 332 | * Mask out the activity signals from RB1-3 to avoid false | 333 | * Mask out the activity signals from RB1-3 to avoid false |
| 333 | * positives | 334 | * positives |
| @@ -381,7 +382,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) | |||
| 381 | 382 | ||
| 382 | gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); | 383 | gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); |
| 383 | 384 | ||
| 384 | if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) | 385 | if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) |
| 385 | gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); | 386 | gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); |
| 386 | 387 | ||
| 387 | gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); | 388 | gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); |
| @@ -573,6 +574,19 @@ static bool a5xx_idle(struct msm_gpu *gpu) | |||
| 573 | return true; | 574 | return true; |
| 574 | } | 575 | } |
| 575 | 576 | ||
| 577 | static int a5xx_fault_handler(void *arg, unsigned long iova, int flags) | ||
| 578 | { | ||
| 579 | struct msm_gpu *gpu = arg; | ||
| 580 | pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n", | ||
| 581 | iova, flags, | ||
| 582 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)), | ||
| 583 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)), | ||
| 584 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)), | ||
| 585 | gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7))); | ||
| 586 | |||
| 587 | return -EFAULT; | ||
| 588 | } | ||
| 589 | |||
| 576 | static void a5xx_cp_err_irq(struct msm_gpu *gpu) | 590 | static void a5xx_cp_err_irq(struct msm_gpu *gpu) |
| 577 | { | 591 | { |
| 578 | u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); | 592 | u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); |
| @@ -884,5 +898,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) | |||
| 884 | return ERR_PTR(ret); | 898 | return ERR_PTR(ret); |
| 885 | } | 899 | } |
| 886 | 900 | ||
| 901 | if (gpu->aspace) | ||
| 902 | msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); | ||
| 903 | |||
| 887 | return gpu; | 904 | return gpu; |
| 888 | } | 905 | } |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 893eb2b2531b..ece39b16a864 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c | |||
| @@ -75,12 +75,14 @@ static const struct adreno_info gpulist[] = { | |||
| 75 | .gmem = (SZ_1M + SZ_512K), | 75 | .gmem = (SZ_1M + SZ_512K), |
| 76 | .init = a4xx_gpu_init, | 76 | .init = a4xx_gpu_init, |
| 77 | }, { | 77 | }, { |
| 78 | .rev = ADRENO_REV(5, 3, 0, ANY_ID), | 78 | .rev = ADRENO_REV(5, 3, 0, 2), |
| 79 | .revn = 530, | 79 | .revn = 530, |
| 80 | .name = "A530", | 80 | .name = "A530", |
| 81 | .pm4fw = "a530_pm4.fw", | 81 | .pm4fw = "a530_pm4.fw", |
| 82 | .pfpfw = "a530_pfp.fw", | 82 | .pfpfw = "a530_pfp.fw", |
| 83 | .gmem = SZ_1M, | 83 | .gmem = SZ_1M, |
| 84 | .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | | ||
| 85 | ADRENO_QUIRK_FAULT_DETECT_MASK, | ||
| 84 | .init = a5xx_gpu_init, | 86 | .init = a5xx_gpu_init, |
| 85 | .gpmufw = "a530v3_gpmu.fw2", | 87 | .gpmufw = "a530v3_gpmu.fw2", |
| 86 | }, | 88 | }, |
| @@ -181,22 +183,51 @@ static void set_gpu_pdev(struct drm_device *dev, | |||
| 181 | priv->gpu_pdev = pdev; | 183 | priv->gpu_pdev = pdev; |
| 182 | } | 184 | } |
| 183 | 185 | ||
| 184 | static const struct { | 186 | static int find_chipid(struct device *dev, u32 *chipid) |
| 185 | const char *str; | 187 | { |
| 186 | uint32_t flag; | 188 | struct device_node *node = dev->of_node; |
| 187 | } quirks[] = { | 189 | const char *compat; |
| 188 | { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI }, | 190 | int ret; |
| 189 | { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK }, | 191 | |
| 190 | }; | 192 | /* first search the compat strings for qcom,adreno-XYZ.W: */ |
| 193 | ret = of_property_read_string_index(node, "compatible", 0, &compat); | ||
| 194 | if (ret == 0) { | ||
| 195 | unsigned rev, patch; | ||
| 196 | |||
| 197 | if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) { | ||
| 198 | *chipid = 0; | ||
| 199 | *chipid |= (rev / 100) << 24; /* core */ | ||
| 200 | rev %= 100; | ||
| 201 | *chipid |= (rev / 10) << 16; /* major */ | ||
| 202 | rev %= 10; | ||
| 203 | *chipid |= rev << 8; /* minor */ | ||
| 204 | *chipid |= patch; | ||
| 205 | |||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | } | ||
| 209 | |||
| 210 | /* and if that fails, fall back to legacy "qcom,chipid" property: */ | ||
| 211 | ret = of_property_read_u32(node, "qcom,chipid", chipid); | ||
| 212 | if (ret) | ||
| 213 | return ret; | ||
| 214 | |||
| 215 | dev_warn(dev, "Using legacy qcom,chipid binding!\n"); | ||
| 216 | dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n", | ||
| 217 | (*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff, | ||
| 218 | (*chipid >> 8) & 0xff, *chipid & 0xff); | ||
| 219 | |||
| 220 | return 0; | ||
| 221 | } | ||
| 191 | 222 | ||
| 192 | static int adreno_bind(struct device *dev, struct device *master, void *data) | 223 | static int adreno_bind(struct device *dev, struct device *master, void *data) |
| 193 | { | 224 | { |
| 194 | static struct adreno_platform_config config = {}; | 225 | static struct adreno_platform_config config = {}; |
| 195 | struct device_node *child, *node = dev->of_node; | 226 | struct device_node *child, *node = dev->of_node; |
| 196 | u32 val; | 227 | u32 val; |
| 197 | int ret, i; | 228 | int ret; |
| 198 | 229 | ||
| 199 | ret = of_property_read_u32(node, "qcom,chipid", &val); | 230 | ret = find_chipid(dev, &val); |
| 200 | if (ret) { | 231 | if (ret) { |
| 201 | dev_err(dev, "could not find chipid: %d\n", ret); | 232 | dev_err(dev, "could not find chipid: %d\n", ret); |
| 202 | return ret; | 233 | return ret; |
| @@ -224,14 +255,12 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) | |||
| 224 | } | 255 | } |
| 225 | 256 | ||
| 226 | if (!config.fast_rate) { | 257 | if (!config.fast_rate) { |
| 227 | dev_err(dev, "could not find clk rates\n"); | 258 | dev_warn(dev, "could not find clk rates\n"); |
| 228 | return -ENXIO; | 259 | /* This is a safe low speed for all devices: */ |
| 260 | config.fast_rate = 200000000; | ||
| 261 | config.slow_rate = 27000000; | ||
| 229 | } | 262 | } |
| 230 | 263 | ||
| 231 | for (i = 0; i < ARRAY_SIZE(quirks); i++) | ||
| 232 | if (of_property_read_bool(node, quirks[i].str)) | ||
| 233 | config.quirks |= quirks[i].flag; | ||
| 234 | |||
| 235 | dev->platform_data = &config; | 264 | dev->platform_data = &config; |
| 236 | set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); | 265 | set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); |
| 237 | return 0; | 266 | return 0; |
| @@ -260,6 +289,7 @@ static int adreno_remove(struct platform_device *pdev) | |||
| 260 | } | 289 | } |
| 261 | 290 | ||
| 262 | static const struct of_device_id dt_match[] = { | 291 | static const struct of_device_id dt_match[] = { |
| 292 | { .compatible = "qcom,adreno" }, | ||
| 263 | { .compatible = "qcom,adreno-3xx" }, | 293 | { .compatible = "qcom,adreno-3xx" }, |
| 264 | /* for backwards compat w/ downstream kgsl DT files: */ | 294 | /* for backwards compat w/ downstream kgsl DT files: */ |
| 265 | { .compatible = "qcom,kgsl-3d0" }, | 295 | { .compatible = "qcom,kgsl-3d0" }, |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 686a580c711a..c9bd1e6225f4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -352,7 +352,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 352 | adreno_gpu->gmem = adreno_gpu->info->gmem; | 352 | adreno_gpu->gmem = adreno_gpu->info->gmem; |
| 353 | adreno_gpu->revn = adreno_gpu->info->revn; | 353 | adreno_gpu->revn = adreno_gpu->info->revn; |
| 354 | adreno_gpu->rev = config->rev; | 354 | adreno_gpu->rev = config->rev; |
| 355 | adreno_gpu->quirks = config->quirks; | ||
| 356 | 355 | ||
| 357 | gpu->fast_rate = config->fast_rate; | 356 | gpu->fast_rate = config->fast_rate; |
| 358 | gpu->slow_rate = config->slow_rate; | 357 | gpu->slow_rate = config->slow_rate; |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index e8d55b0306ed..42e444a67630 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h | |||
| @@ -75,6 +75,7 @@ struct adreno_info { | |||
| 75 | const char *pm4fw, *pfpfw; | 75 | const char *pm4fw, *pfpfw; |
| 76 | const char *gpmufw; | 76 | const char *gpmufw; |
| 77 | uint32_t gmem; | 77 | uint32_t gmem; |
| 78 | enum adreno_quirks quirks; | ||
| 78 | struct msm_gpu *(*init)(struct drm_device *dev); | 79 | struct msm_gpu *(*init)(struct drm_device *dev); |
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| @@ -116,8 +117,6 @@ struct adreno_gpu { | |||
| 116 | * code (a3xx_gpu.c) and stored in this common location. | 117 | * code (a3xx_gpu.c) and stored in this common location. |
| 117 | */ | 118 | */ |
| 118 | const unsigned int *reg_offsets; | 119 | const unsigned int *reg_offsets; |
| 119 | |||
| 120 | uint32_t quirks; | ||
| 121 | }; | 120 | }; |
| 122 | #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) | 121 | #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) |
| 123 | 122 | ||
| @@ -128,7 +127,6 @@ struct adreno_platform_config { | |||
| 128 | #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING | 127 | #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING |
| 129 | struct msm_bus_scale_pdata *bus_scale_table; | 128 | struct msm_bus_scale_pdata *bus_scale_table; |
| 130 | #endif | 129 | #endif |
| 131 | uint32_t quirks; | ||
| 132 | }; | 130 | }; |
| 133 | 131 | ||
| 134 | #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) | 132 | #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) |
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index ec572f8389ed..311c1c1e7d6c 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c | |||
| @@ -18,9 +18,7 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) | |||
| 18 | if (!msm_dsi || !msm_dsi_device_connected(msm_dsi)) | 18 | if (!msm_dsi || !msm_dsi_device_connected(msm_dsi)) |
| 19 | return NULL; | 19 | return NULL; |
| 20 | 20 | ||
| 21 | return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ? | 21 | return msm_dsi->encoder; |
| 22 | msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] : | ||
| 23 | msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID]; | ||
| 24 | } | 22 | } |
| 25 | 23 | ||
| 26 | static int dsi_get_phy(struct msm_dsi *msm_dsi) | 24 | static int dsi_get_phy(struct msm_dsi *msm_dsi) |
| @@ -187,14 +185,13 @@ void __exit msm_dsi_unregister(void) | |||
| 187 | } | 185 | } |
| 188 | 186 | ||
| 189 | int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, | 187 | int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, |
| 190 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) | 188 | struct drm_encoder *encoder) |
| 191 | { | 189 | { |
| 192 | struct msm_drm_private *priv = dev->dev_private; | 190 | struct msm_drm_private *priv = dev->dev_private; |
| 193 | struct drm_bridge *ext_bridge; | 191 | struct drm_bridge *ext_bridge; |
| 194 | int ret, i; | 192 | int ret; |
| 195 | 193 | ||
| 196 | if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] || | 194 | if (WARN_ON(!encoder)) |
| 197 | !encoders[MSM_DSI_CMD_ENCODER_ID])) | ||
| 198 | return -EINVAL; | 195 | return -EINVAL; |
| 199 | 196 | ||
| 200 | msm_dsi->dev = dev; | 197 | msm_dsi->dev = dev; |
| @@ -205,6 +202,8 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, | |||
| 205 | goto fail; | 202 | goto fail; |
| 206 | } | 203 | } |
| 207 | 204 | ||
| 205 | msm_dsi->encoder = encoder; | ||
| 206 | |||
| 208 | msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); | 207 | msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); |
| 209 | if (IS_ERR(msm_dsi->bridge)) { | 208 | if (IS_ERR(msm_dsi->bridge)) { |
| 210 | ret = PTR_ERR(msm_dsi->bridge); | 209 | ret = PTR_ERR(msm_dsi->bridge); |
| @@ -213,11 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, | |||
| 213 | goto fail; | 212 | goto fail; |
| 214 | } | 213 | } |
| 215 | 214 | ||
| 216 | for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { | ||
| 217 | encoders[i]->bridge = msm_dsi->bridge; | ||
| 218 | msm_dsi->encoders[i] = encoders[i]; | ||
| 219 | } | ||
| 220 | |||
| 221 | /* | 215 | /* |
| 222 | * check if the dsi encoder output is connected to a panel or an | 216 | * check if the dsi encoder output is connected to a panel or an |
| 223 | * external bridge. We create a connector only if we're connected to a | 217 | * external bridge. We create a connector only if we're connected to a |
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 03f115f532c2..32369975d155 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h | |||
| @@ -27,14 +27,24 @@ | |||
| 27 | #define DSI_1 1 | 27 | #define DSI_1 1 |
| 28 | #define DSI_MAX 2 | 28 | #define DSI_MAX 2 |
| 29 | 29 | ||
| 30 | struct msm_dsi_phy_shared_timings; | ||
| 31 | struct msm_dsi_phy_clk_request; | ||
| 32 | |||
| 30 | enum msm_dsi_phy_type { | 33 | enum msm_dsi_phy_type { |
| 31 | MSM_DSI_PHY_28NM_HPM, | 34 | MSM_DSI_PHY_28NM_HPM, |
| 32 | MSM_DSI_PHY_28NM_LP, | 35 | MSM_DSI_PHY_28NM_LP, |
| 33 | MSM_DSI_PHY_20NM, | 36 | MSM_DSI_PHY_20NM, |
| 34 | MSM_DSI_PHY_28NM_8960, | 37 | MSM_DSI_PHY_28NM_8960, |
| 38 | MSM_DSI_PHY_14NM, | ||
| 35 | MSM_DSI_PHY_MAX | 39 | MSM_DSI_PHY_MAX |
| 36 | }; | 40 | }; |
| 37 | 41 | ||
| 42 | enum msm_dsi_phy_usecase { | ||
| 43 | MSM_DSI_PHY_STANDALONE, | ||
| 44 | MSM_DSI_PHY_MASTER, | ||
| 45 | MSM_DSI_PHY_SLAVE, | ||
| 46 | }; | ||
| 47 | |||
| 38 | #define DSI_DEV_REGULATOR_MAX 8 | 48 | #define DSI_DEV_REGULATOR_MAX 8 |
| 39 | #define DSI_BUS_CLK_MAX 4 | 49 | #define DSI_BUS_CLK_MAX 4 |
| 40 | 50 | ||
| @@ -73,8 +83,8 @@ struct msm_dsi { | |||
| 73 | struct device *phy_dev; | 83 | struct device *phy_dev; |
| 74 | bool phy_enabled; | 84 | bool phy_enabled; |
| 75 | 85 | ||
| 76 | /* the encoders we are hooked to (outside of dsi block) */ | 86 | /* the encoder we are hooked to (outside of dsi block) */ |
| 77 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]; | 87 | struct drm_encoder *encoder; |
| 78 | 88 | ||
| 79 | int id; | 89 | int id; |
| 80 | }; | 90 | }; |
| @@ -84,12 +94,9 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); | |||
| 84 | void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); | 94 | void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); |
| 85 | struct drm_connector *msm_dsi_manager_connector_init(u8 id); | 95 | struct drm_connector *msm_dsi_manager_connector_init(u8 id); |
| 86 | struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); | 96 | struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); |
| 87 | int msm_dsi_manager_phy_enable(int id, | ||
| 88 | const unsigned long bit_rate, const unsigned long esc_rate, | ||
| 89 | u32 *clk_pre, u32 *clk_post); | ||
| 90 | void msm_dsi_manager_phy_disable(int id); | ||
| 91 | int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); | 97 | int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); |
| 92 | bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); | 98 | bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); |
| 99 | void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags); | ||
| 93 | int msm_dsi_manager_register(struct msm_dsi *msm_dsi); | 100 | int msm_dsi_manager_register(struct msm_dsi *msm_dsi); |
| 94 | void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); | 101 | void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); |
| 95 | 102 | ||
| @@ -111,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, | |||
| 111 | struct clk **byte_clk_provider, struct clk **pixel_clk_provider); | 118 | struct clk **byte_clk_provider, struct clk **pixel_clk_provider); |
| 112 | void msm_dsi_pll_save_state(struct msm_dsi_pll *pll); | 119 | void msm_dsi_pll_save_state(struct msm_dsi_pll *pll); |
| 113 | int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll); | 120 | int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll); |
| 121 | int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, | ||
| 122 | enum msm_dsi_phy_usecase uc); | ||
| 114 | #else | 123 | #else |
| 115 | static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, | 124 | static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, |
| 116 | enum msm_dsi_phy_type type, int id) { | 125 | enum msm_dsi_phy_type type, int id) { |
| @@ -131,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) | |||
| 131 | { | 140 | { |
| 132 | return 0; | 141 | return 0; |
| 133 | } | 142 | } |
| 143 | static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, | ||
| 144 | enum msm_dsi_phy_usecase uc) | ||
| 145 | { | ||
| 146 | return -ENODEV; | ||
| 147 | } | ||
| 134 | #endif | 148 | #endif |
| 135 | 149 | ||
| 136 | /* dsi host */ | 150 | /* dsi host */ |
| @@ -146,7 +160,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, | |||
| 146 | u32 dma_base, u32 len); | 160 | u32 dma_base, u32 len); |
| 147 | int msm_dsi_host_enable(struct mipi_dsi_host *host); | 161 | int msm_dsi_host_enable(struct mipi_dsi_host *host); |
| 148 | int msm_dsi_host_disable(struct mipi_dsi_host *host); | 162 | int msm_dsi_host_disable(struct mipi_dsi_host *host); |
| 149 | int msm_dsi_host_power_on(struct mipi_dsi_host *host); | 163 | int msm_dsi_host_power_on(struct mipi_dsi_host *host, |
| 164 | struct msm_dsi_phy_shared_timings *phy_shared_timings); | ||
| 150 | int msm_dsi_host_power_off(struct mipi_dsi_host *host); | 165 | int msm_dsi_host_power_off(struct mipi_dsi_host *host); |
| 151 | int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, | 166 | int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, |
| 152 | struct drm_display_mode *mode); | 167 | struct drm_display_mode *mode); |
| @@ -157,6 +172,9 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); | |||
| 157 | void msm_dsi_host_unregister(struct mipi_dsi_host *host); | 172 | void msm_dsi_host_unregister(struct mipi_dsi_host *host); |
| 158 | int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, | 173 | int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, |
| 159 | struct msm_dsi_pll *src_pll); | 174 | struct msm_dsi_pll *src_pll); |
| 175 | void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); | ||
| 176 | void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, | ||
| 177 | struct msm_dsi_phy_clk_request *clk_req); | ||
| 160 | void msm_dsi_host_destroy(struct mipi_dsi_host *host); | 178 | void msm_dsi_host_destroy(struct mipi_dsi_host *host); |
| 161 | int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, | 179 | int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, |
| 162 | struct drm_device *dev); | 180 | struct drm_device *dev); |
| @@ -164,14 +182,27 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi); | |||
| 164 | 182 | ||
| 165 | /* dsi phy */ | 183 | /* dsi phy */ |
| 166 | struct msm_dsi_phy; | 184 | struct msm_dsi_phy; |
| 185 | struct msm_dsi_phy_shared_timings { | ||
| 186 | u32 clk_post; | ||
| 187 | u32 clk_pre; | ||
| 188 | bool clk_pre_inc_by_2; | ||
| 189 | }; | ||
| 190 | |||
| 191 | struct msm_dsi_phy_clk_request { | ||
| 192 | unsigned long bitclk_rate; | ||
| 193 | unsigned long escclk_rate; | ||
| 194 | }; | ||
| 195 | |||
| 167 | void msm_dsi_phy_driver_register(void); | 196 | void msm_dsi_phy_driver_register(void); |
| 168 | void msm_dsi_phy_driver_unregister(void); | 197 | void msm_dsi_phy_driver_unregister(void); |
| 169 | int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | 198 | int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, |
| 170 | const unsigned long bit_rate, const unsigned long esc_rate); | 199 | struct msm_dsi_phy_clk_request *clk_req); |
| 171 | void msm_dsi_phy_disable(struct msm_dsi_phy *phy); | 200 | void msm_dsi_phy_disable(struct msm_dsi_phy *phy); |
| 172 | void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, | 201 | void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, |
| 173 | u32 *clk_pre, u32 *clk_post); | 202 | struct msm_dsi_phy_shared_timings *shared_timing); |
| 174 | struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); | 203 | struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); |
| 204 | void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, | ||
| 205 | enum msm_dsi_phy_usecase uc); | ||
| 175 | 206 | ||
| 176 | #endif /* __DSI_CONNECTOR_H__ */ | 207 | #endif /* __DSI_CONNECTOR_H__ */ |
| 177 | 208 | ||
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 39dff7d5e89b..b3d70ea42891 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h | |||
| @@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) | 11 | - /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-01-11 05:19:19) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) | 12 | - /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) | 13 | |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) | 14 | Copyright (C) 2013-2017 by the following authors: |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) | ||
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) | ||
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) | ||
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) | ||
| 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) | ||
| 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) | ||
| 21 | - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) | ||
| 22 | |||
| 23 | Copyright (C) 2013-2015 by the following authors: | ||
| 24 | - Rob Clark <robdclark@gmail.com> (robclark) | 15 | - Rob Clark <robdclark@gmail.com> (robclark) |
| 25 | - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) | 16 | - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) |
| 26 | 17 | ||
| @@ -1304,5 +1295,257 @@ static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) | |||
| 1304 | 1295 | ||
| 1305 | #define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 | 1296 | #define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 |
| 1306 | 1297 | ||
| 1298 | #define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000 | ||
| 1299 | |||
| 1300 | #define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004 | ||
| 1301 | |||
| 1302 | #define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008 | ||
| 1303 | |||
| 1304 | #define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c | ||
| 1305 | |||
| 1306 | #define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010 | ||
| 1307 | #define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0 | ||
| 1308 | #define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4 | ||
| 1309 | static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val) | ||
| 1310 | { | ||
| 1311 | return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK; | ||
| 1312 | } | ||
| 1313 | #define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0 | ||
| 1314 | #define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4 | ||
| 1315 | static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val) | ||
| 1316 | { | ||
| 1317 | return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK; | ||
| 1318 | } | ||
| 1319 | |||
| 1320 | #define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014 | ||
| 1321 | #define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001 | ||
| 1322 | |||
| 1323 | #define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018 | ||
| 1324 | #define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004 | ||
| 1325 | |||
| 1326 | #define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c | ||
| 1327 | |||
| 1328 | #define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020 | ||
| 1329 | |||
| 1330 | #define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024 | ||
| 1331 | |||
| 1332 | #define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028 | ||
| 1333 | |||
| 1334 | #define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c | ||
| 1335 | |||
| 1336 | #define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030 | ||
| 1337 | |||
| 1338 | #define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034 | ||
| 1339 | |||
| 1340 | #define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038 | ||
| 1341 | |||
| 1342 | #define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c | ||
| 1343 | |||
| 1344 | #define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040 | ||
| 1345 | |||
| 1346 | #define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044 | ||
| 1347 | |||
| 1348 | #define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048 | ||
| 1349 | #define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001 | ||
| 1350 | |||
| 1351 | #define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c | ||
| 1352 | #define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f | ||
| 1353 | #define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0 | ||
| 1354 | static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val) | ||
| 1355 | { | ||
| 1356 | return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK; | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } | ||
| 1360 | |||
| 1361 | static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } | ||
| 1362 | #define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0 | ||
| 1363 | #define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6 | ||
| 1364 | static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val) | ||
| 1365 | { | ||
| 1366 | return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK; | ||
| 1367 | } | ||
| 1368 | |||
| 1369 | static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } | ||
| 1370 | #define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001 | ||
| 1371 | |||
| 1372 | static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } | ||
| 1373 | |||
| 1374 | static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; } | ||
| 1375 | |||
| 1376 | static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; } | ||
| 1377 | |||
| 1378 | static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; } | ||
| 1379 | |||
| 1380 | static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; } | ||
| 1381 | #define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff | ||
| 1382 | #define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0 | ||
| 1383 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val) | ||
| 1384 | { | ||
| 1385 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK; | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; } | ||
| 1389 | #define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff | ||
| 1390 | #define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0 | ||
| 1391 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val) | ||
| 1392 | { | ||
| 1393 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK; | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; } | ||
| 1397 | #define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff | ||
| 1398 | #define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 | ||
| 1399 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val) | ||
| 1400 | { | ||
| 1401 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK; | ||
| 1402 | } | ||
| 1403 | |||
| 1404 | static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; } | ||
| 1405 | #define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff | ||
| 1406 | #define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 | ||
| 1407 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val) | ||
| 1408 | { | ||
| 1409 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK; | ||
| 1410 | } | ||
| 1411 | |||
| 1412 | static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; } | ||
| 1413 | #define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff | ||
| 1414 | #define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0 | ||
| 1415 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val) | ||
| 1416 | { | ||
| 1417 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK; | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; } | ||
| 1421 | #define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007 | ||
| 1422 | #define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0 | ||
| 1423 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val) | ||
| 1424 | { | ||
| 1425 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK; | ||
| 1426 | } | ||
| 1427 | #define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 | ||
| 1428 | #define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4 | ||
| 1429 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val) | ||
| 1430 | { | ||
| 1431 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK; | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; } | ||
| 1435 | #define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007 | ||
| 1436 | #define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0 | ||
| 1437 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val) | ||
| 1438 | { | ||
| 1439 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK; | ||
| 1440 | } | ||
| 1441 | |||
| 1442 | static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; } | ||
| 1443 | #define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff | ||
| 1444 | #define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 | ||
| 1445 | static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) | ||
| 1446 | { | ||
| 1447 | return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK; | ||
| 1448 | } | ||
| 1449 | |||
| 1450 | static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; } | ||
| 1451 | |||
| 1452 | static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; } | ||
| 1453 | |||
| 1454 | static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; } | ||
| 1455 | |||
| 1456 | #define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000 | ||
| 1457 | |||
| 1458 | #define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004 | ||
| 1459 | |||
| 1460 | #define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010 | ||
| 1461 | |||
| 1462 | #define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c | ||
| 1463 | |||
| 1464 | #define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028 | ||
| 1465 | |||
| 1466 | #define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c | ||
| 1467 | |||
| 1468 | #define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030 | ||
| 1469 | |||
| 1470 | #define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034 | ||
| 1471 | |||
| 1472 | #define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038 | ||
| 1473 | |||
| 1474 | #define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c | ||
| 1475 | |||
| 1476 | #define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040 | ||
| 1477 | |||
| 1478 | #define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044 | ||
| 1479 | |||
| 1480 | #define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048 | ||
| 1481 | |||
| 1482 | #define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c | ||
| 1483 | |||
| 1484 | #define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c | ||
| 1485 | |||
| 1486 | #define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058 | ||
| 1487 | |||
| 1488 | #define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c | ||
| 1489 | |||
| 1490 | #define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070 | ||
| 1491 | |||
| 1492 | #define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074 | ||
| 1493 | |||
| 1494 | #define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078 | ||
| 1495 | |||
| 1496 | #define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c | ||
| 1497 | |||
| 1498 | #define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080 | ||
| 1499 | |||
| 1500 | #define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084 | ||
| 1501 | |||
| 1502 | #define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088 | ||
| 1503 | |||
| 1504 | #define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c | ||
| 1505 | |||
| 1506 | #define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090 | ||
| 1507 | |||
| 1508 | #define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094 | ||
| 1509 | |||
| 1510 | #define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098 | ||
| 1511 | |||
| 1512 | #define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c | ||
| 1513 | |||
| 1514 | #define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0 | ||
| 1515 | |||
| 1516 | #define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4 | ||
| 1517 | |||
| 1518 | #define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8 | ||
| 1519 | |||
| 1520 | #define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac | ||
| 1521 | |||
| 1522 | #define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4 | ||
| 1523 | |||
| 1524 | #define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8 | ||
| 1525 | |||
| 1526 | #define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc | ||
| 1527 | |||
| 1528 | #define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0 | ||
| 1529 | |||
| 1530 | #define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4 | ||
| 1531 | |||
| 1532 | #define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc | ||
| 1533 | |||
| 1534 | #define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8 | ||
| 1535 | |||
| 1536 | #define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0 | ||
| 1537 | |||
| 1538 | #define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4 | ||
| 1539 | |||
| 1540 | #define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8 | ||
| 1541 | |||
| 1542 | #define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc | ||
| 1543 | |||
| 1544 | #define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100 | ||
| 1545 | |||
| 1546 | #define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104 | ||
| 1547 | |||
| 1548 | #define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108 | ||
| 1549 | |||
| 1307 | 1550 | ||
| 1308 | #endif /* DSI_XML */ | 1551 | #endif /* DSI_XML */ |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 63436d8ee470..a5d75c9b3a73 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c | |||
| @@ -94,6 +94,30 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { | |||
| 94 | .num_dsi = 2, | 94 | .num_dsi = 2, |
| 95 | }; | 95 | }; |
| 96 | 96 | ||
| 97 | /* | ||
| 98 | * TODO: core_mmss_clk fails to enable for some reason, but things work fine | ||
| 99 | * without it too. Figure out why it doesn't enable and uncomment below | ||
| 100 | */ | ||
| 101 | static const char * const dsi_8996_bus_clk_names[] = { | ||
| 102 | "mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */ | ||
| 103 | }; | ||
| 104 | |||
| 105 | static const struct msm_dsi_config msm8996_dsi_cfg = { | ||
| 106 | .io_offset = DSI_6G_REG_SHIFT, | ||
| 107 | .reg_cfg = { | ||
| 108 | .num = 2, | ||
| 109 | .regs = { | ||
| 110 | {"vdda", 18160, 1 }, /* 1.25 V */ | ||
| 111 | {"vcca", 17000, 32 }, /* 0.925 V */ | ||
| 112 | {"vddio", 100000, 100 },/* 1.8 V */ | ||
| 113 | }, | ||
| 114 | }, | ||
| 115 | .bus_clk_names = dsi_8996_bus_clk_names, | ||
| 116 | .num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names), | ||
| 117 | .io_start = { 0x994000, 0x996000 }, | ||
| 118 | .num_dsi = 2, | ||
| 119 | }; | ||
| 120 | |||
| 97 | static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { | 121 | static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { |
| 98 | {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg}, | 122 | {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg}, |
| 99 | {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, | 123 | {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, |
| @@ -106,6 +130,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { | |||
| 106 | &msm8974_apq8084_dsi_cfg}, | 130 | &msm8974_apq8084_dsi_cfg}, |
| 107 | {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, | 131 | {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, |
| 108 | {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, | 132 | {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, |
| 133 | {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg}, | ||
| 109 | }; | 134 | }; |
| 110 | 135 | ||
| 111 | const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) | 136 | const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index eeacc3232494..00a5da2663c6 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 | 24 | #define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 |
| 25 | #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 | 25 | #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 |
| 26 | #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 | 26 | #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 |
| 27 | #define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 | ||
| 27 | 28 | ||
| 28 | #define MSM_DSI_V2_VER_MINOR_8064 0x0 | 29 | #define MSM_DSI_V2_VER_MINOR_8064 0x0 |
| 29 | 30 | ||
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index c96e270361b0..4f79b109173d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
| @@ -691,17 +691,6 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) | |||
| 691 | return 0; | 691 | return 0; |
| 692 | } | 692 | } |
| 693 | 693 | ||
| 694 | static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host) | ||
| 695 | { | ||
| 696 | DBG(""); | ||
| 697 | dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); | ||
| 698 | /* Make sure fully reset */ | ||
| 699 | wmb(); | ||
| 700 | udelay(1000); | ||
| 701 | dsi_write(msm_host, REG_DSI_PHY_RESET, 0); | ||
| 702 | udelay(100); | ||
| 703 | } | ||
| 704 | |||
| 705 | static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) | 694 | static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) |
| 706 | { | 695 | { |
| 707 | u32 intr; | 696 | u32 intr; |
| @@ -756,7 +745,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( | |||
| 756 | } | 745 | } |
| 757 | 746 | ||
| 758 | static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, | 747 | static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, |
| 759 | u32 clk_pre, u32 clk_post) | 748 | struct msm_dsi_phy_shared_timings *phy_shared_timings) |
| 760 | { | 749 | { |
| 761 | u32 flags = msm_host->mode_flags; | 750 | u32 flags = msm_host->mode_flags; |
| 762 | enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; | 751 | enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; |
| @@ -819,10 +808,16 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, | |||
| 819 | data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; | 808 | data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; |
| 820 | dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); | 809 | dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); |
| 821 | 810 | ||
| 822 | data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) | | 811 | data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) | |
| 823 | DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre); | 812 | DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre); |
| 824 | dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); | 813 | dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); |
| 825 | 814 | ||
| 815 | if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && | ||
| 816 | (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) && | ||
| 817 | phy_shared_timings->clk_pre_inc_by_2) | ||
| 818 | dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND, | ||
| 819 | DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK); | ||
| 820 | |||
| 826 | data = 0; | 821 | data = 0; |
| 827 | if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) | 822 | if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) |
| 828 | data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; | 823 | data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; |
| @@ -1482,6 +1477,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host, | |||
| 1482 | msm_host->format = dsi->format; | 1477 | msm_host->format = dsi->format; |
| 1483 | msm_host->mode_flags = dsi->mode_flags; | 1478 | msm_host->mode_flags = dsi->mode_flags; |
| 1484 | 1479 | ||
| 1480 | msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags); | ||
| 1481 | |||
| 1485 | /* Some gpios defined in panel DT need to be controlled by host */ | 1482 | /* Some gpios defined in panel DT need to be controlled by host */ |
| 1486 | ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); | 1483 | ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); |
| 1487 | if (ret) | 1484 | if (ret) |
| @@ -1557,8 +1554,9 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, | |||
| 1557 | 1554 | ||
| 1558 | prop = of_find_property(ep, "data-lanes", &len); | 1555 | prop = of_find_property(ep, "data-lanes", &len); |
| 1559 | if (!prop) { | 1556 | if (!prop) { |
| 1560 | dev_dbg(dev, "failed to find data lane mapping\n"); | 1557 | dev_dbg(dev, |
| 1561 | return -EINVAL; | 1558 | "failed to find data lane mapping, using default\n"); |
| 1559 | return 0; | ||
| 1562 | } | 1560 | } |
| 1563 | 1561 | ||
| 1564 | num_lanes = len / sizeof(u32); | 1562 | num_lanes = len / sizeof(u32); |
| @@ -1615,7 +1613,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) | |||
| 1615 | struct device *dev = &msm_host->pdev->dev; | 1613 | struct device *dev = &msm_host->pdev->dev; |
| 1616 | struct device_node *np = dev->of_node; | 1614 | struct device_node *np = dev->of_node; |
| 1617 | struct device_node *endpoint, *device_node; | 1615 | struct device_node *endpoint, *device_node; |
| 1618 | int ret; | 1616 | int ret = 0; |
| 1619 | 1617 | ||
| 1620 | /* | 1618 | /* |
| 1621 | * Get the endpoint of the output port of the DSI host. In our case, | 1619 | * Get the endpoint of the output port of the DSI host. In our case, |
| @@ -1639,8 +1637,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) | |||
| 1639 | /* Get panel node from the output port's endpoint data */ | 1637 | /* Get panel node from the output port's endpoint data */ |
| 1640 | device_node = of_graph_get_remote_port_parent(endpoint); | 1638 | device_node = of_graph_get_remote_port_parent(endpoint); |
| 1641 | if (!device_node) { | 1639 | if (!device_node) { |
| 1642 | dev_err(dev, "%s: no valid device\n", __func__); | 1640 | dev_dbg(dev, "%s: no valid device\n", __func__); |
| 1643 | ret = -ENODEV; | ||
| 1644 | goto err; | 1641 | goto err; |
| 1645 | } | 1642 | } |
| 1646 | 1643 | ||
| @@ -2119,6 +2116,28 @@ exit: | |||
| 2119 | return ret; | 2116 | return ret; |
| 2120 | } | 2117 | } |
| 2121 | 2118 | ||
| 2119 | void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) | ||
| 2120 | { | ||
| 2121 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); | ||
| 2122 | |||
| 2123 | DBG(""); | ||
| 2124 | dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); | ||
| 2125 | /* Make sure fully reset */ | ||
| 2126 | wmb(); | ||
| 2127 | udelay(1000); | ||
| 2128 | dsi_write(msm_host, REG_DSI_PHY_RESET, 0); | ||
| 2129 | udelay(100); | ||
| 2130 | } | ||
| 2131 | |||
| 2132 | void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, | ||
| 2133 | struct msm_dsi_phy_clk_request *clk_req) | ||
| 2134 | { | ||
| 2135 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); | ||
| 2136 | |||
| 2137 | clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; | ||
| 2138 | clk_req->escclk_rate = msm_host->esc_clk_rate; | ||
| 2139 | } | ||
| 2140 | |||
| 2122 | int msm_dsi_host_enable(struct mipi_dsi_host *host) | 2141 | int msm_dsi_host_enable(struct mipi_dsi_host *host) |
| 2123 | { | 2142 | { |
| 2124 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); | 2143 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); |
| @@ -2166,10 +2185,10 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) | |||
| 2166 | SFPB_GPREG_MASTER_PORT_EN(en)); | 2185 | SFPB_GPREG_MASTER_PORT_EN(en)); |
| 2167 | } | 2186 | } |
| 2168 | 2187 | ||
| 2169 | int msm_dsi_host_power_on(struct mipi_dsi_host *host) | 2188 | int msm_dsi_host_power_on(struct mipi_dsi_host *host, |
| 2189 | struct msm_dsi_phy_shared_timings *phy_shared_timings) | ||
| 2170 | { | 2190 | { |
| 2171 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); | 2191 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); |
| 2172 | u32 clk_pre = 0, clk_post = 0; | ||
| 2173 | int ret = 0; | 2192 | int ret = 0; |
| 2174 | 2193 | ||
| 2175 | mutex_lock(&msm_host->dev_mutex); | 2194 | mutex_lock(&msm_host->dev_mutex); |
| @@ -2180,12 +2199,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) | |||
| 2180 | 2199 | ||
| 2181 | msm_dsi_sfpb_config(msm_host, true); | 2200 | msm_dsi_sfpb_config(msm_host, true); |
| 2182 | 2201 | ||
| 2183 | ret = dsi_calc_clk_rate(msm_host); | ||
| 2184 | if (ret) { | ||
| 2185 | pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); | ||
| 2186 | goto unlock_ret; | ||
| 2187 | } | ||
| 2188 | |||
| 2189 | ret = dsi_host_regulator_enable(msm_host); | 2202 | ret = dsi_host_regulator_enable(msm_host); |
| 2190 | if (ret) { | 2203 | if (ret) { |
| 2191 | pr_err("%s:Failed to enable vregs.ret=%d\n", | 2204 | pr_err("%s:Failed to enable vregs.ret=%d\n", |
| @@ -2193,23 +2206,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) | |||
| 2193 | goto unlock_ret; | 2206 | goto unlock_ret; |
| 2194 | } | 2207 | } |
| 2195 | 2208 | ||
| 2196 | ret = dsi_bus_clk_enable(msm_host); | ||
| 2197 | if (ret) { | ||
| 2198 | pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret); | ||
| 2199 | goto fail_disable_reg; | ||
| 2200 | } | ||
| 2201 | |||
| 2202 | dsi_phy_sw_reset(msm_host); | ||
| 2203 | ret = msm_dsi_manager_phy_enable(msm_host->id, | ||
| 2204 | msm_host->byte_clk_rate * 8, | ||
| 2205 | msm_host->esc_clk_rate, | ||
| 2206 | &clk_pre, &clk_post); | ||
| 2207 | dsi_bus_clk_disable(msm_host); | ||
| 2208 | if (ret) { | ||
| 2209 | pr_err("%s: failed to enable phy, %d\n", __func__, ret); | ||
| 2210 | goto fail_disable_reg; | ||
| 2211 | } | ||
| 2212 | |||
| 2213 | ret = dsi_clk_ctrl(msm_host, 1); | 2209 | ret = dsi_clk_ctrl(msm_host, 1); |
| 2214 | if (ret) { | 2210 | if (ret) { |
| 2215 | pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); | 2211 | pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); |
| @@ -2225,7 +2221,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) | |||
| 2225 | 2221 | ||
| 2226 | dsi_timing_setup(msm_host); | 2222 | dsi_timing_setup(msm_host); |
| 2227 | dsi_sw_reset(msm_host); | 2223 | dsi_sw_reset(msm_host); |
| 2228 | dsi_ctrl_config(msm_host, true, clk_pre, clk_post); | 2224 | dsi_ctrl_config(msm_host, true, phy_shared_timings); |
| 2229 | 2225 | ||
| 2230 | if (msm_host->disp_en_gpio) | 2226 | if (msm_host->disp_en_gpio) |
| 2231 | gpiod_set_value(msm_host->disp_en_gpio, 1); | 2227 | gpiod_set_value(msm_host->disp_en_gpio, 1); |
| @@ -2254,15 +2250,13 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) | |||
| 2254 | goto unlock_ret; | 2250 | goto unlock_ret; |
| 2255 | } | 2251 | } |
| 2256 | 2252 | ||
| 2257 | dsi_ctrl_config(msm_host, false, 0, 0); | 2253 | dsi_ctrl_config(msm_host, false, NULL); |
| 2258 | 2254 | ||
| 2259 | if (msm_host->disp_en_gpio) | 2255 | if (msm_host->disp_en_gpio) |
| 2260 | gpiod_set_value(msm_host->disp_en_gpio, 0); | 2256 | gpiod_set_value(msm_host->disp_en_gpio, 0); |
| 2261 | 2257 | ||
| 2262 | pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); | 2258 | pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); |
| 2263 | 2259 | ||
| 2264 | msm_dsi_manager_phy_disable(msm_host->id); | ||
| 2265 | |||
| 2266 | dsi_clk_ctrl(msm_host, 0); | 2260 | dsi_clk_ctrl(msm_host, 0); |
| 2267 | 2261 | ||
| 2268 | dsi_host_regulator_disable(msm_host); | 2262 | dsi_host_regulator_disable(msm_host); |
| @@ -2282,6 +2276,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, | |||
| 2282 | struct drm_display_mode *mode) | 2276 | struct drm_display_mode *mode) |
| 2283 | { | 2277 | { |
| 2284 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); | 2278 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); |
| 2279 | int ret; | ||
| 2285 | 2280 | ||
| 2286 | if (msm_host->mode) { | 2281 | if (msm_host->mode) { |
| 2287 | drm_mode_destroy(msm_host->dev, msm_host->mode); | 2282 | drm_mode_destroy(msm_host->dev, msm_host->mode); |
| @@ -2294,6 +2289,12 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, | |||
| 2294 | return -ENOMEM; | 2289 | return -ENOMEM; |
| 2295 | } | 2290 | } |
| 2296 | 2291 | ||
| 2292 | ret = dsi_calc_clk_rate(msm_host); | ||
| 2293 | if (ret) { | ||
| 2294 | pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); | ||
| 2295 | return ret; | ||
| 2296 | } | ||
| 2297 | |||
| 2297 | return 0; | 2298 | return 0; |
| 2298 | } | 2299 | } |
| 2299 | 2300 | ||
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 2bd8dad76105..921270ea6059 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c | |||
| @@ -72,11 +72,12 @@ static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id) | |||
| 72 | return 0; | 72 | return 0; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | static int dsi_mgr_host_register(int id) | 75 | static int dsi_mgr_setup_components(int id) |
| 76 | { | 76 | { |
| 77 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); | 77 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); |
| 78 | struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); | 78 | struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); |
| 79 | struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); | 79 | struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); |
| 80 | struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); | ||
| 80 | struct msm_dsi_pll *src_pll; | 81 | struct msm_dsi_pll *src_pll; |
| 81 | int ret; | 82 | int ret; |
| 82 | 83 | ||
| @@ -85,15 +86,16 @@ static int dsi_mgr_host_register(int id) | |||
| 85 | if (ret) | 86 | if (ret) |
| 86 | return ret; | 87 | return ret; |
| 87 | 88 | ||
| 89 | msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); | ||
| 88 | src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); | 90 | src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); |
| 89 | ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); | 91 | ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); |
| 90 | } else if (!other_dsi) { | 92 | } else if (!other_dsi) { |
| 91 | ret = 0; | 93 | ret = 0; |
| 92 | } else { | 94 | } else { |
| 93 | struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ? | 95 | struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? |
| 94 | msm_dsi : other_dsi; | 96 | msm_dsi : other_dsi; |
| 95 | struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ? | 97 | struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? |
| 96 | other_dsi : msm_dsi; | 98 | other_dsi : msm_dsi; |
| 97 | /* Register slave host first, so that slave DSI device | 99 | /* Register slave host first, so that slave DSI device |
| 98 | * has a chance to probe, and do not block the master | 100 | * has a chance to probe, and do not block the master |
| 99 | * DSI device's probe. | 101 | * DSI device's probe. |
| @@ -101,14 +103,18 @@ static int dsi_mgr_host_register(int id) | |||
| 101 | * because only master DSI device adds the panel to global | 103 | * because only master DSI device adds the panel to global |
| 102 | * panel list. The panel's device is the master DSI device. | 104 | * panel list. The panel's device is the master DSI device. |
| 103 | */ | 105 | */ |
| 104 | ret = msm_dsi_host_register(sdsi->host, false); | 106 | ret = msm_dsi_host_register(slave_link_dsi->host, false); |
| 105 | if (ret) | 107 | if (ret) |
| 106 | return ret; | 108 | return ret; |
| 107 | ret = msm_dsi_host_register(mdsi->host, true); | 109 | ret = msm_dsi_host_register(master_link_dsi->host, true); |
| 108 | if (ret) | 110 | if (ret) |
| 109 | return ret; | 111 | return ret; |
| 110 | 112 | ||
| 111 | /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */ | 113 | /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */ |
| 114 | msm_dsi_phy_set_usecase(clk_master_dsi->phy, | ||
| 115 | MSM_DSI_PHY_MASTER); | ||
| 116 | msm_dsi_phy_set_usecase(clk_slave_dsi->phy, | ||
| 117 | MSM_DSI_PHY_SLAVE); | ||
| 112 | src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy); | 118 | src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy); |
| 113 | ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); | 119 | ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); |
| 114 | if (ret) | 120 | if (ret) |
| @@ -119,6 +125,84 @@ static int dsi_mgr_host_register(int id) | |||
| 119 | return ret; | 125 | return ret; |
| 120 | } | 126 | } |
| 121 | 127 | ||
| 128 | static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id, | ||
| 129 | struct msm_dsi_phy_shared_timings *shared_timings) | ||
| 130 | { | ||
| 131 | struct msm_dsi_phy_clk_request clk_req; | ||
| 132 | int ret; | ||
| 133 | |||
| 134 | msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req); | ||
| 135 | |||
| 136 | ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req); | ||
| 137 | msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings); | ||
| 138 | |||
| 139 | return ret; | ||
| 140 | } | ||
| 141 | |||
| 142 | static int | ||
| 143 | dsi_mgr_phy_enable(int id, | ||
| 144 | struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX]) | ||
| 145 | { | ||
| 146 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); | ||
| 147 | struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); | ||
| 148 | struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); | ||
| 149 | int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; | ||
| 150 | int ret; | ||
| 151 | |||
| 152 | /* In case of dual DSI, some registers in PHY1 have been programmed | ||
| 153 | * during PLL0 clock's set_rate. The PHY1 reset called by host1 here | ||
| 154 | * will silently reset those PHY1 registers. Therefore we need to reset | ||
| 155 | * and enable both PHYs before any PLL clock operation. | ||
| 156 | */ | ||
| 157 | if (IS_DUAL_DSI() && mdsi && sdsi) { | ||
| 158 | if (!mdsi->phy_enabled && !sdsi->phy_enabled) { | ||
| 159 | msm_dsi_host_reset_phy(mdsi->host); | ||
| 160 | msm_dsi_host_reset_phy(sdsi->host); | ||
| 161 | |||
| 162 | ret = enable_phy(mdsi, src_pll_id, | ||
| 163 | &shared_timings[DSI_CLOCK_MASTER]); | ||
| 164 | if (ret) | ||
| 165 | return ret; | ||
| 166 | ret = enable_phy(sdsi, src_pll_id, | ||
| 167 | &shared_timings[DSI_CLOCK_SLAVE]); | ||
| 168 | if (ret) { | ||
| 169 | msm_dsi_phy_disable(mdsi->phy); | ||
| 170 | return ret; | ||
| 171 | } | ||
| 172 | } | ||
| 173 | } else { | ||
| 174 | msm_dsi_host_reset_phy(mdsi->host); | ||
| 175 | ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); | ||
| 176 | if (ret) | ||
| 177 | return ret; | ||
| 178 | } | ||
| 179 | |||
| 180 | msm_dsi->phy_enabled = true; | ||
| 181 | |||
| 182 | return 0; | ||
| 183 | } | ||
| 184 | |||
| 185 | static void dsi_mgr_phy_disable(int id) | ||
| 186 | { | ||
| 187 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); | ||
| 188 | struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); | ||
| 189 | struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); | ||
| 190 | |||
| 191 | /* disable DSI phy | ||
| 192 | * In dual-dsi configuration, the phy should be disabled for the | ||
| 193 | * first controller only when the second controller is disabled. | ||
| 194 | */ | ||
| 195 | msm_dsi->phy_enabled = false; | ||
| 196 | if (IS_DUAL_DSI() && mdsi && sdsi) { | ||
| 197 | if (!mdsi->phy_enabled && !sdsi->phy_enabled) { | ||
| 198 | msm_dsi_phy_disable(sdsi->phy); | ||
| 199 | msm_dsi_phy_disable(mdsi->phy); | ||
| 200 | } | ||
| 201 | } else { | ||
| 202 | msm_dsi_phy_disable(msm_dsi->phy); | ||
| 203 | } | ||
| 204 | } | ||
| 205 | |||
| 122 | struct dsi_connector { | 206 | struct dsi_connector { |
| 123 | struct drm_connector base; | 207 | struct drm_connector base; |
| 124 | int id; | 208 | int id; |
| @@ -168,6 +252,16 @@ static enum drm_connector_status dsi_mgr_connector_detect( | |||
| 168 | msm_dsi->panel = msm_dsi_host_get_panel( | 252 | msm_dsi->panel = msm_dsi_host_get_panel( |
| 169 | other_dsi->host, NULL); | 253 | other_dsi->host, NULL); |
| 170 | 254 | ||
| 255 | |||
| 256 | if (msm_dsi->panel && kms->funcs->set_encoder_mode) { | ||
| 257 | bool cmd_mode = !(msm_dsi->device_flags & | ||
| 258 | MIPI_DSI_MODE_VIDEO); | ||
| 259 | struct drm_encoder *encoder = | ||
| 260 | msm_dsi_get_encoder(msm_dsi); | ||
| 261 | |||
| 262 | kms->funcs->set_encoder_mode(kms, encoder, cmd_mode); | ||
| 263 | } | ||
| 264 | |||
| 171 | if (msm_dsi->panel && IS_DUAL_DSI()) | 265 | if (msm_dsi->panel && IS_DUAL_DSI()) |
| 172 | drm_object_attach_property(&connector->base, | 266 | drm_object_attach_property(&connector->base, |
| 173 | connector->dev->mode_config.tile_property, 0); | 267 | connector->dev->mode_config.tile_property, 0); |
| @@ -344,22 +438,31 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) | |||
| 344 | struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); | 438 | struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); |
| 345 | struct mipi_dsi_host *host = msm_dsi->host; | 439 | struct mipi_dsi_host *host = msm_dsi->host; |
| 346 | struct drm_panel *panel = msm_dsi->panel; | 440 | struct drm_panel *panel = msm_dsi->panel; |
| 441 | struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX]; | ||
| 347 | bool is_dual_dsi = IS_DUAL_DSI(); | 442 | bool is_dual_dsi = IS_DUAL_DSI(); |
| 348 | int ret; | 443 | int ret; |
| 349 | 444 | ||
| 350 | DBG("id=%d", id); | 445 | DBG("id=%d", id); |
| 351 | if (!msm_dsi_device_connected(msm_dsi) || | 446 | if (!msm_dsi_device_connected(msm_dsi)) |
| 352 | (is_dual_dsi && (DSI_1 == id))) | ||
| 353 | return; | 447 | return; |
| 354 | 448 | ||
| 355 | ret = msm_dsi_host_power_on(host); | 449 | ret = dsi_mgr_phy_enable(id, phy_shared_timings); |
| 450 | if (ret) | ||
| 451 | goto phy_en_fail; | ||
| 452 | |||
| 453 | /* Do nothing with the host if it is DSI 1 in case of dual DSI */ | ||
| 454 | if (is_dual_dsi && (DSI_1 == id)) | ||
| 455 | return; | ||
| 456 | |||
| 457 | ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]); | ||
| 356 | if (ret) { | 458 | if (ret) { |
| 357 | pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); | 459 | pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); |
| 358 | goto host_on_fail; | 460 | goto host_on_fail; |
| 359 | } | 461 | } |
| 360 | 462 | ||
| 361 | if (is_dual_dsi && msm_dsi1) { | 463 | if (is_dual_dsi && msm_dsi1) { |
| 362 | ret = msm_dsi_host_power_on(msm_dsi1->host); | 464 | ret = msm_dsi_host_power_on(msm_dsi1->host, |
| 465 | &phy_shared_timings[DSI_1]); | ||
| 363 | if (ret) { | 466 | if (ret) { |
| 364 | pr_err("%s: power on host1 failed, %d\n", | 467 | pr_err("%s: power on host1 failed, %d\n", |
| 365 | __func__, ret); | 468 | __func__, ret); |
| @@ -418,6 +521,8 @@ panel_prep_fail: | |||
| 418 | host1_on_fail: | 521 | host1_on_fail: |
| 419 | msm_dsi_host_power_off(host); | 522 | msm_dsi_host_power_off(host); |
| 420 | host_on_fail: | 523 | host_on_fail: |
| 524 | dsi_mgr_phy_disable(id); | ||
| 525 | phy_en_fail: | ||
| 421 | return; | 526 | return; |
| 422 | } | 527 | } |
| 423 | 528 | ||
| @@ -443,10 +548,17 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) | |||
| 443 | 548 | ||
| 444 | DBG("id=%d", id); | 549 | DBG("id=%d", id); |
| 445 | 550 | ||
| 446 | if (!msm_dsi_device_connected(msm_dsi) || | 551 | if (!msm_dsi_device_connected(msm_dsi)) |
| 447 | (is_dual_dsi && (DSI_1 == id))) | ||
| 448 | return; | 552 | return; |
| 449 | 553 | ||
| 554 | /* | ||
| 555 | * Do nothing with the host if it is DSI 1 in case of dual DSI. | ||
| 556 | * It is safe to call dsi_mgr_phy_disable() here because a single PHY | ||
| 557 | * won't be diabled until both PHYs request disable. | ||
| 558 | */ | ||
| 559 | if (is_dual_dsi && (DSI_1 == id)) | ||
| 560 | goto disable_phy; | ||
| 561 | |||
| 450 | if (panel) { | 562 | if (panel) { |
| 451 | ret = drm_panel_disable(panel); | 563 | ret = drm_panel_disable(panel); |
| 452 | if (ret) | 564 | if (ret) |
| @@ -481,6 +593,9 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) | |||
| 481 | pr_err("%s: host1 power off failed, %d\n", | 593 | pr_err("%s: host1 power off failed, %d\n", |
| 482 | __func__, ret); | 594 | __func__, ret); |
| 483 | } | 595 | } |
| 596 | |||
| 597 | disable_phy: | ||
| 598 | dsi_mgr_phy_disable(id); | ||
| 484 | } | 599 | } |
| 485 | 600 | ||
| 486 | static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, | 601 | static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, |
| @@ -540,7 +655,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) | |||
| 540 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); | 655 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); |
| 541 | struct drm_connector *connector = NULL; | 656 | struct drm_connector *connector = NULL; |
| 542 | struct dsi_connector *dsi_connector; | 657 | struct dsi_connector *dsi_connector; |
| 543 | int ret, i; | 658 | int ret; |
| 544 | 659 | ||
| 545 | dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL); | 660 | dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL); |
| 546 | if (!dsi_connector) | 661 | if (!dsi_connector) |
| @@ -566,9 +681,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) | |||
| 566 | connector->interlace_allowed = 0; | 681 | connector->interlace_allowed = 0; |
| 567 | connector->doublescan_allowed = 0; | 682 | connector->doublescan_allowed = 0; |
| 568 | 683 | ||
| 569 | for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) | 684 | drm_mode_connector_attach_encoder(connector, msm_dsi->encoder); |
| 570 | drm_mode_connector_attach_encoder(connector, | ||
| 571 | msm_dsi->encoders[i]); | ||
| 572 | 685 | ||
| 573 | return connector; | 686 | return connector; |
| 574 | } | 687 | } |
| @@ -591,13 +704,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id) | |||
| 591 | 704 | ||
| 592 | dsi_bridge->id = id; | 705 | dsi_bridge->id = id; |
| 593 | 706 | ||
| 594 | /* | 707 | encoder = msm_dsi->encoder; |
| 595 | * HACK: we may not know the external DSI bridge device's mode | ||
| 596 | * flags here. We'll get to know them only when the device | ||
| 597 | * attaches to the dsi host. For now, assume the bridge supports | ||
| 598 | * DSI video mode | ||
| 599 | */ | ||
| 600 | encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; | ||
| 601 | 708 | ||
| 602 | bridge = &dsi_bridge->base; | 709 | bridge = &dsi_bridge->base; |
| 603 | bridge->funcs = &dsi_mgr_bridge_funcs; | 710 | bridge->funcs = &dsi_mgr_bridge_funcs; |
| @@ -628,13 +735,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) | |||
| 628 | ext_bridge = msm_dsi->external_bridge = | 735 | ext_bridge = msm_dsi->external_bridge = |
| 629 | msm_dsi_host_get_bridge(msm_dsi->host); | 736 | msm_dsi_host_get_bridge(msm_dsi->host); |
| 630 | 737 | ||
| 631 | /* | 738 | encoder = msm_dsi->encoder; |
| 632 | * HACK: we may not know the external DSI bridge device's mode | ||
| 633 | * flags here. We'll get to know them only when the device | ||
| 634 | * attaches to the dsi host. For now, assume the bridge supports | ||
| 635 | * DSI video mode | ||
| 636 | */ | ||
| 637 | encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; | ||
| 638 | 739 | ||
| 639 | /* link the internal dsi bridge to the external bridge */ | 740 | /* link the internal dsi bridge to the external bridge */ |
| 640 | drm_bridge_attach(encoder, ext_bridge, int_bridge); | 741 | drm_bridge_attach(encoder, ext_bridge, int_bridge); |
| @@ -662,68 +763,6 @@ void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) | |||
| 662 | { | 763 | { |
| 663 | } | 764 | } |
| 664 | 765 | ||
| 665 | int msm_dsi_manager_phy_enable(int id, | ||
| 666 | const unsigned long bit_rate, const unsigned long esc_rate, | ||
| 667 | u32 *clk_pre, u32 *clk_post) | ||
| 668 | { | ||
| 669 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); | ||
| 670 | struct msm_dsi_phy *phy = msm_dsi->phy; | ||
| 671 | int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; | ||
| 672 | struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); | ||
| 673 | int ret; | ||
| 674 | |||
| 675 | ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate); | ||
| 676 | if (ret) | ||
| 677 | return ret; | ||
| 678 | |||
| 679 | /* | ||
| 680 | * Reset DSI PHY silently changes its PLL registers to reset status, | ||
| 681 | * which will confuse clock driver and result in wrong output rate of | ||
| 682 | * link clocks. Restore PLL status if its PLL is being used as clock | ||
| 683 | * source. | ||
| 684 | */ | ||
| 685 | if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) { | ||
| 686 | ret = msm_dsi_pll_restore_state(pll); | ||
| 687 | if (ret) { | ||
| 688 | pr_err("%s: failed to restore pll state\n", __func__); | ||
| 689 | msm_dsi_phy_disable(phy); | ||
| 690 | return ret; | ||
| 691 | } | ||
| 692 | } | ||
| 693 | |||
| 694 | msm_dsi->phy_enabled = true; | ||
| 695 | msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post); | ||
| 696 | |||
| 697 | return 0; | ||
| 698 | } | ||
| 699 | |||
| 700 | void msm_dsi_manager_phy_disable(int id) | ||
| 701 | { | ||
| 702 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); | ||
| 703 | struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); | ||
| 704 | struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); | ||
| 705 | struct msm_dsi_phy *phy = msm_dsi->phy; | ||
| 706 | struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); | ||
| 707 | |||
| 708 | /* Save PLL status if it is a clock source */ | ||
| 709 | if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) | ||
| 710 | msm_dsi_pll_save_state(pll); | ||
| 711 | |||
| 712 | /* disable DSI phy | ||
| 713 | * In dual-dsi configuration, the phy should be disabled for the | ||
| 714 | * first controller only when the second controller is disabled. | ||
| 715 | */ | ||
| 716 | msm_dsi->phy_enabled = false; | ||
| 717 | if (IS_DUAL_DSI() && mdsi && sdsi) { | ||
| 718 | if (!mdsi->phy_enabled && !sdsi->phy_enabled) { | ||
| 719 | msm_dsi_phy_disable(sdsi->phy); | ||
| 720 | msm_dsi_phy_disable(mdsi->phy); | ||
| 721 | } | ||
| 722 | } else { | ||
| 723 | msm_dsi_phy_disable(phy); | ||
| 724 | } | ||
| 725 | } | ||
| 726 | |||
| 727 | int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg) | 766 | int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg) |
| 728 | { | 767 | { |
| 729 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); | 768 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); |
| @@ -787,6 +826,33 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len) | |||
| 787 | return true; | 826 | return true; |
| 788 | } | 827 | } |
| 789 | 828 | ||
| 829 | void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags) | ||
| 830 | { | ||
| 831 | struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); | ||
| 832 | struct drm_device *dev = msm_dsi->dev; | ||
| 833 | struct msm_drm_private *priv; | ||
| 834 | struct msm_kms *kms; | ||
| 835 | struct drm_encoder *encoder; | ||
| 836 | |||
| 837 | /* | ||
| 838 | * drm_device pointer is assigned to msm_dsi only in the modeset_init | ||
| 839 | * path. If mipi_dsi_attach() happens in DSI driver's probe path | ||
| 840 | * (generally the case when we're connected to a drm_panel of the type | ||
| 841 | * mipi_dsi_device), this would be NULL. In such cases, try to set the | ||
| 842 | * encoder mode in the DSI connector's detect() op. | ||
| 843 | */ | ||
| 844 | if (!dev) | ||
| 845 | return; | ||
| 846 | |||
| 847 | priv = dev->dev_private; | ||
| 848 | kms = priv->kms; | ||
| 849 | encoder = msm_dsi_get_encoder(msm_dsi); | ||
| 850 | |||
| 851 | if (encoder && kms->funcs->set_encoder_mode) | ||
| 852 | if (!(device_flags & MIPI_DSI_MODE_VIDEO)) | ||
| 853 | kms->funcs->set_encoder_mode(kms, encoder, true); | ||
| 854 | } | ||
| 855 | |||
| 790 | int msm_dsi_manager_register(struct msm_dsi *msm_dsi) | 856 | int msm_dsi_manager_register(struct msm_dsi *msm_dsi) |
| 791 | { | 857 | { |
| 792 | struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; | 858 | struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; |
| @@ -811,7 +877,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) | |||
| 811 | goto fail; | 877 | goto fail; |
| 812 | } | 878 | } |
| 813 | 879 | ||
| 814 | ret = dsi_mgr_host_register(id); | 880 | ret = dsi_mgr_setup_components(id); |
| 815 | if (ret) { | 881 | if (ret) { |
| 816 | pr_err("%s: failed to register mipi dsi host for DSI %d\n", | 882 | pr_err("%s: failed to register mipi dsi host for DSI %d\n", |
| 817 | __func__, id); | 883 | __func__, id); |
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index f39386ed75e4..0c2eb9c9a1fc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | |||
| @@ -54,8 +54,10 @@ static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing, | |||
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, | 56 | int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, |
| 57 | const unsigned long bit_rate, const unsigned long esc_rate) | 57 | struct msm_dsi_phy_clk_request *clk_req) |
| 58 | { | 58 | { |
| 59 | const unsigned long bit_rate = clk_req->bitclk_rate; | ||
| 60 | const unsigned long esc_rate = clk_req->escclk_rate; | ||
| 59 | s32 ui, lpx; | 61 | s32 ui, lpx; |
| 60 | s32 tmax, tmin; | 62 | s32 tmax, tmin; |
| 61 | s32 pcnt0 = 10; | 63 | s32 pcnt0 = 10; |
| @@ -115,8 +117,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, | |||
| 115 | temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; | 117 | temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; |
| 116 | temp = 60 * coeff + 52 * ui - 24 * ui - temp; | 118 | temp = 60 * coeff + 52 * ui - 24 * ui - temp; |
| 117 | tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; | 119 | tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; |
| 118 | timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); | 120 | timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0, |
| 119 | 121 | false); | |
| 120 | tmax = 63; | 122 | tmax = 63; |
| 121 | temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; | 123 | temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; |
| 122 | temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; | 124 | temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; |
| @@ -124,17 +126,21 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, | |||
| 124 | tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; | 126 | tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; |
| 125 | if (tmin > tmax) { | 127 | if (tmin > tmax) { |
| 126 | temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); | 128 | temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); |
| 127 | timing->clk_pre = temp >> 1; | 129 | timing->shared_timings.clk_pre = temp >> 1; |
| 130 | timing->shared_timings.clk_pre_inc_by_2 = true; | ||
| 128 | } else { | 131 | } else { |
| 129 | timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); | 132 | timing->shared_timings.clk_pre = |
| 133 | linear_inter(tmax, tmin, pcnt2, 0, false); | ||
| 134 | timing->shared_timings.clk_pre_inc_by_2 = false; | ||
| 130 | } | 135 | } |
| 131 | 136 | ||
| 132 | timing->ta_go = 3; | 137 | timing->ta_go = 3; |
| 133 | timing->ta_sure = 0; | 138 | timing->ta_sure = 0; |
| 134 | timing->ta_get = 4; | 139 | timing->ta_get = 4; |
| 135 | 140 | ||
| 136 | DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", | 141 | DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", |
| 137 | timing->clk_pre, timing->clk_post, timing->clk_zero, | 142 | timing->shared_timings.clk_pre, timing->shared_timings.clk_post, |
| 143 | timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, | ||
| 138 | timing->clk_trail, timing->clk_prepare, timing->hs_exit, | 144 | timing->clk_trail, timing->clk_prepare, timing->hs_exit, |
| 139 | timing->hs_zero, timing->hs_prepare, timing->hs_trail, | 145 | timing->hs_zero, timing->hs_prepare, timing->hs_trail, |
| 140 | timing->hs_rqst); | 146 | timing->hs_rqst); |
| @@ -142,6 +148,123 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, | |||
| 142 | return 0; | 148 | return 0; |
| 143 | } | 149 | } |
| 144 | 150 | ||
| 151 | int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, | ||
| 152 | struct msm_dsi_phy_clk_request *clk_req) | ||
| 153 | { | ||
| 154 | const unsigned long bit_rate = clk_req->bitclk_rate; | ||
| 155 | const unsigned long esc_rate = clk_req->escclk_rate; | ||
| 156 | s32 ui, ui_x8, lpx; | ||
| 157 | s32 tmax, tmin; | ||
| 158 | s32 pcnt0 = 50; | ||
| 159 | s32 pcnt1 = 50; | ||
| 160 | s32 pcnt2 = 10; | ||
| 161 | s32 pcnt3 = 30; | ||
| 162 | s32 pcnt4 = 10; | ||
| 163 | s32 pcnt5 = 2; | ||
| 164 | s32 coeff = 1000; /* Precision, should avoid overflow */ | ||
| 165 | s32 hb_en, hb_en_ckln, pd_ckln, pd; | ||
| 166 | s32 val, val_ckln; | ||
| 167 | s32 temp; | ||
| 168 | |||
| 169 | if (!bit_rate || !esc_rate) | ||
| 170 | return -EINVAL; | ||
| 171 | |||
| 172 | timing->hs_halfbyte_en = 0; | ||
| 173 | hb_en = 0; | ||
| 174 | timing->hs_halfbyte_en_ckln = 0; | ||
| 175 | hb_en_ckln = 0; | ||
| 176 | timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3; | ||
| 177 | pd_ckln = timing->hs_prep_dly_ckln; | ||
| 178 | timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1; | ||
| 179 | pd = timing->hs_prep_dly; | ||
| 180 | |||
| 181 | val = (hb_en << 2) + (pd << 1); | ||
| 182 | val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1); | ||
| 183 | |||
| 184 | ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); | ||
| 185 | ui_x8 = ui << 3; | ||
| 186 | lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); | ||
| 187 | |||
| 188 | temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8); | ||
| 189 | tmin = max_t(s32, temp, 0); | ||
| 190 | temp = (95 * coeff - val_ckln * ui) / ui_x8; | ||
| 191 | tmax = max_t(s32, temp, 0); | ||
| 192 | timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false); | ||
| 193 | |||
| 194 | temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui; | ||
| 195 | tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3; | ||
| 196 | tmax = (tmin > 255) ? 511 : 255; | ||
| 197 | timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false); | ||
| 198 | |||
| 199 | tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); | ||
| 200 | temp = 105 * coeff + 12 * ui - 20 * coeff; | ||
| 201 | tmax = (temp + 3 * ui) / ui_x8; | ||
| 202 | timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false); | ||
| 203 | |||
| 204 | temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8); | ||
| 205 | tmin = max_t(s32, temp, 0); | ||
| 206 | temp = (85 * coeff + 6 * ui - val * ui) / ui_x8; | ||
| 207 | tmax = max_t(s32, temp, 0); | ||
| 208 | timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false); | ||
| 209 | |||
| 210 | temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui; | ||
| 211 | tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3; | ||
| 212 | tmax = 255; | ||
| 213 | timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false); | ||
| 214 | |||
| 215 | tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8); | ||
| 216 | temp = 105 * coeff + 12 * ui - 20 * coeff; | ||
| 217 | tmax = (temp + 3 * ui) / ui_x8; | ||
| 218 | timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false); | ||
| 219 | |||
| 220 | temp = 50 * coeff + ((hb_en << 2) - 8) * ui; | ||
| 221 | timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); | ||
| 222 | |||
| 223 | tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; | ||
| 224 | tmax = 255; | ||
| 225 | timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false); | ||
| 226 | |||
| 227 | temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui; | ||
| 228 | timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8); | ||
| 229 | |||
| 230 | temp = 60 * coeff + 52 * ui - 43 * ui; | ||
| 231 | tmin = DIV_ROUND_UP(temp, ui_x8) - 1; | ||
| 232 | tmax = 63; | ||
| 233 | timing->shared_timings.clk_post = | ||
| 234 | linear_inter(tmax, tmin, pcnt2, 0, false); | ||
| 235 | |||
| 236 | temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui; | ||
| 237 | temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui; | ||
| 238 | temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) : | ||
| 239 | (((timing->hs_rqst_ckln << 3) + 8) * ui); | ||
| 240 | tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; | ||
| 241 | tmax = 63; | ||
| 242 | if (tmin > tmax) { | ||
| 243 | temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false); | ||
| 244 | timing->shared_timings.clk_pre = temp >> 1; | ||
| 245 | timing->shared_timings.clk_pre_inc_by_2 = 1; | ||
| 246 | } else { | ||
| 247 | timing->shared_timings.clk_pre = | ||
| 248 | linear_inter(tmax, tmin, pcnt2, 0, false); | ||
| 249 | timing->shared_timings.clk_pre_inc_by_2 = 0; | ||
| 250 | } | ||
| 251 | |||
| 252 | timing->ta_go = 3; | ||
| 253 | timing->ta_sure = 0; | ||
| 254 | timing->ta_get = 4; | ||
| 255 | |||
| 256 | DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", | ||
| 257 | timing->shared_timings.clk_pre, timing->shared_timings.clk_post, | ||
| 258 | timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, | ||
| 259 | timing->clk_trail, timing->clk_prepare, timing->hs_exit, | ||
| 260 | timing->hs_zero, timing->hs_prepare, timing->hs_trail, | ||
| 261 | timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en, | ||
| 262 | timing->hs_halfbyte_en_ckln, timing->hs_prep_dly, | ||
| 263 | timing->hs_prep_dly_ckln); | ||
| 264 | |||
| 265 | return 0; | ||
| 266 | } | ||
| 267 | |||
| 145 | void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, | 268 | void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, |
| 146 | u32 bit_mask) | 269 | u32 bit_mask) |
| 147 | { | 270 | { |
| @@ -268,6 +391,10 @@ static const struct of_device_id dsi_phy_dt_match[] = { | |||
| 268 | { .compatible = "qcom,dsi-phy-28nm-8960", | 391 | { .compatible = "qcom,dsi-phy-28nm-8960", |
| 269 | .data = &dsi_phy_28nm_8960_cfgs }, | 392 | .data = &dsi_phy_28nm_8960_cfgs }, |
| 270 | #endif | 393 | #endif |
| 394 | #ifdef CONFIG_DRM_MSM_DSI_14NM_PHY | ||
| 395 | { .compatible = "qcom,dsi-phy-14nm", | ||
| 396 | .data = &dsi_phy_14nm_cfgs }, | ||
| 397 | #endif | ||
| 271 | {} | 398 | {} |
| 272 | }; | 399 | }; |
| 273 | 400 | ||
| @@ -295,6 +422,24 @@ static int dsi_phy_get_id(struct msm_dsi_phy *phy) | |||
| 295 | return -EINVAL; | 422 | return -EINVAL; |
| 296 | } | 423 | } |
| 297 | 424 | ||
| 425 | int msm_dsi_phy_init_common(struct msm_dsi_phy *phy) | ||
| 426 | { | ||
| 427 | struct platform_device *pdev = phy->pdev; | ||
| 428 | int ret = 0; | ||
| 429 | |||
| 430 | phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", | ||
| 431 | "DSI_PHY_REG"); | ||
| 432 | if (IS_ERR(phy->reg_base)) { | ||
| 433 | dev_err(&pdev->dev, "%s: failed to map phy regulator base\n", | ||
| 434 | __func__); | ||
| 435 | ret = -ENOMEM; | ||
| 436 | goto fail; | ||
| 437 | } | ||
| 438 | |||
| 439 | fail: | ||
| 440 | return ret; | ||
| 441 | } | ||
| 442 | |||
| 298 | static int dsi_phy_driver_probe(struct platform_device *pdev) | 443 | static int dsi_phy_driver_probe(struct platform_device *pdev) |
| 299 | { | 444 | { |
| 300 | struct msm_dsi_phy *phy; | 445 | struct msm_dsi_phy *phy; |
| @@ -331,15 +476,6 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) | |||
| 331 | goto fail; | 476 | goto fail; |
| 332 | } | 477 | } |
| 333 | 478 | ||
| 334 | phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", | ||
| 335 | "DSI_PHY_REG"); | ||
| 336 | if (IS_ERR(phy->reg_base)) { | ||
| 337 | dev_err(dev, "%s: failed to map phy regulator base\n", | ||
| 338 | __func__); | ||
| 339 | ret = -ENOMEM; | ||
| 340 | goto fail; | ||
| 341 | } | ||
| 342 | |||
| 343 | ret = dsi_phy_regulator_init(phy); | 479 | ret = dsi_phy_regulator_init(phy); |
| 344 | if (ret) { | 480 | if (ret) { |
| 345 | dev_err(dev, "%s: failed to init regulator\n", __func__); | 481 | dev_err(dev, "%s: failed to init regulator\n", __func__); |
| @@ -353,6 +489,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) | |||
| 353 | goto fail; | 489 | goto fail; |
| 354 | } | 490 | } |
| 355 | 491 | ||
| 492 | if (phy->cfg->ops.init) { | ||
| 493 | ret = phy->cfg->ops.init(phy); | ||
| 494 | if (ret) | ||
| 495 | goto fail; | ||
| 496 | } | ||
| 497 | |||
| 356 | /* PLL init will call into clk_register which requires | 498 | /* PLL init will call into clk_register which requires |
| 357 | * register access, so we need to enable power and ahb clock. | 499 | * register access, so we need to enable power and ahb clock. |
| 358 | */ | 500 | */ |
| @@ -410,7 +552,7 @@ void __exit msm_dsi_phy_driver_unregister(void) | |||
| 410 | } | 552 | } |
| 411 | 553 | ||
| 412 | int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | 554 | int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, |
| 413 | const unsigned long bit_rate, const unsigned long esc_rate) | 555 | struct msm_dsi_phy_clk_request *clk_req) |
| 414 | { | 556 | { |
| 415 | struct device *dev = &phy->pdev->dev; | 557 | struct device *dev = &phy->pdev->dev; |
| 416 | int ret; | 558 | int ret; |
| @@ -418,21 +560,52 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | |||
| 418 | if (!phy || !phy->cfg->ops.enable) | 560 | if (!phy || !phy->cfg->ops.enable) |
| 419 | return -EINVAL; | 561 | return -EINVAL; |
| 420 | 562 | ||
| 563 | ret = dsi_phy_enable_resource(phy); | ||
| 564 | if (ret) { | ||
| 565 | dev_err(dev, "%s: resource enable failed, %d\n", | ||
| 566 | __func__, ret); | ||
| 567 | goto res_en_fail; | ||
| 568 | } | ||
| 569 | |||
| 421 | ret = dsi_phy_regulator_enable(phy); | 570 | ret = dsi_phy_regulator_enable(phy); |
| 422 | if (ret) { | 571 | if (ret) { |
| 423 | dev_err(dev, "%s: regulator enable failed, %d\n", | 572 | dev_err(dev, "%s: regulator enable failed, %d\n", |
| 424 | __func__, ret); | 573 | __func__, ret); |
| 425 | return ret; | 574 | goto reg_en_fail; |
| 426 | } | 575 | } |
| 427 | 576 | ||
| 428 | ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate); | 577 | ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req); |
| 429 | if (ret) { | 578 | if (ret) { |
| 430 | dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); | 579 | dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); |
| 431 | dsi_phy_regulator_disable(phy); | 580 | goto phy_en_fail; |
| 432 | return ret; | 581 | } |
| 582 | |||
| 583 | /* | ||
| 584 | * Resetting DSI PHY silently changes its PLL registers to reset status, | ||
| 585 | * which will confuse clock driver and result in wrong output rate of | ||
| 586 | * link clocks. Restore PLL status if its PLL is being used as clock | ||
| 587 | * source. | ||
| 588 | */ | ||
| 589 | if (phy->usecase != MSM_DSI_PHY_SLAVE) { | ||
| 590 | ret = msm_dsi_pll_restore_state(phy->pll); | ||
| 591 | if (ret) { | ||
| 592 | dev_err(dev, "%s: failed to restore pll state, %d\n", | ||
| 593 | __func__, ret); | ||
| 594 | goto pll_restor_fail; | ||
| 595 | } | ||
| 433 | } | 596 | } |
| 434 | 597 | ||
| 435 | return 0; | 598 | return 0; |
| 599 | |||
| 600 | pll_restor_fail: | ||
| 601 | if (phy->cfg->ops.disable) | ||
| 602 | phy->cfg->ops.disable(phy); | ||
| 603 | phy_en_fail: | ||
| 604 | dsi_phy_regulator_disable(phy); | ||
| 605 | reg_en_fail: | ||
| 606 | dsi_phy_disable_resource(phy); | ||
| 607 | res_en_fail: | ||
| 608 | return ret; | ||
| 436 | } | 609 | } |
| 437 | 610 | ||
| 438 | void msm_dsi_phy_disable(struct msm_dsi_phy *phy) | 611 | void msm_dsi_phy_disable(struct msm_dsi_phy *phy) |
| @@ -440,21 +613,21 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) | |||
| 440 | if (!phy || !phy->cfg->ops.disable) | 613 | if (!phy || !phy->cfg->ops.disable) |
| 441 | return; | 614 | return; |
| 442 | 615 | ||
| 616 | /* Save PLL status if it is a clock source */ | ||
| 617 | if (phy->usecase != MSM_DSI_PHY_SLAVE) | ||
| 618 | msm_dsi_pll_save_state(phy->pll); | ||
| 619 | |||
| 443 | phy->cfg->ops.disable(phy); | 620 | phy->cfg->ops.disable(phy); |
| 444 | 621 | ||
| 445 | dsi_phy_regulator_disable(phy); | 622 | dsi_phy_regulator_disable(phy); |
| 623 | dsi_phy_disable_resource(phy); | ||
| 446 | } | 624 | } |
| 447 | 625 | ||
| 448 | void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, | 626 | void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, |
| 449 | u32 *clk_pre, u32 *clk_post) | 627 | struct msm_dsi_phy_shared_timings *shared_timings) |
| 450 | { | 628 | { |
| 451 | if (!phy) | 629 | memcpy(shared_timings, &phy->timing.shared_timings, |
| 452 | return; | 630 | sizeof(*shared_timings)); |
| 453 | |||
| 454 | if (clk_pre) | ||
| 455 | *clk_pre = phy->timing.clk_pre; | ||
| 456 | if (clk_post) | ||
| 457 | *clk_post = phy->timing.clk_post; | ||
| 458 | } | 631 | } |
| 459 | 632 | ||
| 460 | struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) | 633 | struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) |
| @@ -465,3 +638,9 @@ struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) | |||
| 465 | return phy->pll; | 638 | return phy->pll; |
| 466 | } | 639 | } |
| 467 | 640 | ||
| 641 | void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, | ||
| 642 | enum msm_dsi_phy_usecase uc) | ||
| 643 | { | ||
| 644 | if (phy) | ||
| 645 | phy->usecase = uc; | ||
| 646 | } | ||
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index f24a85439b94..1733f6608a09 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | |||
| @@ -22,8 +22,9 @@ | |||
| 22 | #define dsi_phy_write(offset, data) msm_writel((data), (offset)) | 22 | #define dsi_phy_write(offset, data) msm_writel((data), (offset)) |
| 23 | 23 | ||
| 24 | struct msm_dsi_phy_ops { | 24 | struct msm_dsi_phy_ops { |
| 25 | int (*init) (struct msm_dsi_phy *phy); | ||
| 25 | int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, | 26 | int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, |
| 26 | const unsigned long bit_rate, const unsigned long esc_rate); | 27 | struct msm_dsi_phy_clk_request *clk_req); |
| 27 | void (*disable)(struct msm_dsi_phy *phy); | 28 | void (*disable)(struct msm_dsi_phy *phy); |
| 28 | }; | 29 | }; |
| 29 | 30 | ||
| @@ -46,6 +47,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; | |||
| 46 | extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; | 47 | extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; |
| 47 | extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; | 48 | extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; |
| 48 | extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; | 49 | extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; |
| 50 | extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; | ||
| 49 | 51 | ||
| 50 | struct msm_dsi_dphy_timing { | 52 | struct msm_dsi_dphy_timing { |
| 51 | u32 clk_pre; | 53 | u32 clk_pre; |
| @@ -61,12 +63,22 @@ struct msm_dsi_dphy_timing { | |||
| 61 | u32 ta_go; | 63 | u32 ta_go; |
| 62 | u32 ta_sure; | 64 | u32 ta_sure; |
| 63 | u32 ta_get; | 65 | u32 ta_get; |
| 66 | |||
| 67 | struct msm_dsi_phy_shared_timings shared_timings; | ||
| 68 | |||
| 69 | /* For PHY v2 only */ | ||
| 70 | u32 hs_rqst_ckln; | ||
| 71 | u32 hs_prep_dly; | ||
| 72 | u32 hs_prep_dly_ckln; | ||
| 73 | u8 hs_halfbyte_en; | ||
| 74 | u8 hs_halfbyte_en_ckln; | ||
| 64 | }; | 75 | }; |
| 65 | 76 | ||
| 66 | struct msm_dsi_phy { | 77 | struct msm_dsi_phy { |
| 67 | struct platform_device *pdev; | 78 | struct platform_device *pdev; |
| 68 | void __iomem *base; | 79 | void __iomem *base; |
| 69 | void __iomem *reg_base; | 80 | void __iomem *reg_base; |
| 81 | void __iomem *lane_base; | ||
| 70 | int id; | 82 | int id; |
| 71 | 83 | ||
| 72 | struct clk *ahb_clk; | 84 | struct clk *ahb_clk; |
| @@ -75,6 +87,7 @@ struct msm_dsi_phy { | |||
| 75 | struct msm_dsi_dphy_timing timing; | 87 | struct msm_dsi_dphy_timing timing; |
| 76 | const struct msm_dsi_phy_cfg *cfg; | 88 | const struct msm_dsi_phy_cfg *cfg; |
| 77 | 89 | ||
| 90 | enum msm_dsi_phy_usecase usecase; | ||
| 78 | bool regulator_ldo_mode; | 91 | bool regulator_ldo_mode; |
| 79 | 92 | ||
| 80 | struct msm_dsi_pll *pll; | 93 | struct msm_dsi_pll *pll; |
| @@ -84,9 +97,12 @@ struct msm_dsi_phy { | |||
| 84 | * PHY internal functions | 97 | * PHY internal functions |
| 85 | */ | 98 | */ |
| 86 | int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, | 99 | int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, |
| 87 | const unsigned long bit_rate, const unsigned long esc_rate); | 100 | struct msm_dsi_phy_clk_request *clk_req); |
| 101 | int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, | ||
| 102 | struct msm_dsi_phy_clk_request *clk_req); | ||
| 88 | void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, | 103 | void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, |
| 89 | u32 bit_mask); | 104 | u32 bit_mask); |
| 105 | int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); | ||
| 90 | 106 | ||
| 91 | #endif /* __DSI_PHY_H__ */ | 107 | #endif /* __DSI_PHY_H__ */ |
| 92 | 108 | ||
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c new file mode 100644 index 000000000000..513f4234adc1 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c | |||
| @@ -0,0 +1,169 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, The Linux Foundation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 and | ||
| 6 | * only version 2 as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include "dsi_phy.h" | ||
| 15 | #include "dsi.xml.h" | ||
| 16 | |||
| 17 | #define PHY_14NM_CKLN_IDX 4 | ||
| 18 | |||
| 19 | static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy, | ||
| 20 | struct msm_dsi_dphy_timing *timing, | ||
| 21 | int lane_idx) | ||
| 22 | { | ||
| 23 | void __iomem *base = phy->lane_base; | ||
| 24 | bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX); | ||
| 25 | u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero; | ||
| 26 | u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare; | ||
| 27 | u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail; | ||
| 28 | u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst; | ||
| 29 | u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly; | ||
| 30 | u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln : | ||
| 31 | timing->hs_halfbyte_en; | ||
| 32 | |||
| 33 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx), | ||
| 34 | DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); | ||
| 35 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx), | ||
| 36 | DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero)); | ||
| 37 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx), | ||
| 38 | DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare)); | ||
| 39 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx), | ||
| 40 | DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail)); | ||
| 41 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx), | ||
| 42 | DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst)); | ||
| 43 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx), | ||
| 44 | DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly)); | ||
| 45 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx), | ||
| 46 | halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0); | ||
| 47 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx), | ||
| 48 | DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) | | ||
| 49 | DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); | ||
| 50 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx), | ||
| 51 | DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get)); | ||
| 52 | dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx), | ||
| 53 | DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0)); | ||
| 54 | } | ||
| 55 | |||
| 56 | static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | ||
| 57 | struct msm_dsi_phy_clk_request *clk_req) | ||
| 58 | { | ||
| 59 | struct msm_dsi_dphy_timing *timing = &phy->timing; | ||
| 60 | u32 data; | ||
| 61 | int i; | ||
| 62 | int ret; | ||
| 63 | void __iomem *base = phy->base; | ||
| 64 | void __iomem *lane_base = phy->lane_base; | ||
| 65 | |||
| 66 | if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) { | ||
| 67 | dev_err(&phy->pdev->dev, | ||
| 68 | "%s: D-PHY timing calculation failed\n", __func__); | ||
| 69 | return -EINVAL; | ||
| 70 | } | ||
| 71 | |||
| 72 | data = 0x1c; | ||
| 73 | if (phy->usecase != MSM_DSI_PHY_STANDALONE) | ||
| 74 | data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32); | ||
| 75 | dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data); | ||
| 76 | |||
| 77 | dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1); | ||
| 78 | |||
| 79 | /* 4 data lanes + 1 clk lane configuration */ | ||
| 80 | for (i = 0; i < 5; i++) { | ||
| 81 | dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i), | ||
| 82 | 0x1d); | ||
| 83 | |||
| 84 | dsi_phy_write(lane_base + | ||
| 85 | REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff); | ||
| 86 | dsi_phy_write(lane_base + | ||
| 87 | REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i), | ||
| 88 | (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06); | ||
| 89 | |||
| 90 | dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i), | ||
| 91 | (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f); | ||
| 92 | dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10); | ||
| 93 | dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i), | ||
| 94 | 0); | ||
| 95 | dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i), | ||
| 96 | 0x88); | ||
| 97 | |||
| 98 | dsi_14nm_dphy_set_timing(phy, timing, i); | ||
| 99 | } | ||
| 100 | |||
| 101 | /* Make sure PLL is not start */ | ||
| 102 | dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00); | ||
| 103 | |||
| 104 | wmb(); /* make sure everything is written before reset and enable */ | ||
| 105 | |||
| 106 | /* reset digital block */ | ||
| 107 | dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80); | ||
| 108 | wmb(); /* ensure reset is asserted */ | ||
| 109 | udelay(100); | ||
| 110 | dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00); | ||
| 111 | |||
| 112 | msm_dsi_phy_set_src_pll(phy, src_pll_id, | ||
| 113 | REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, | ||
| 114 | DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL); | ||
| 115 | |||
| 116 | ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); | ||
| 117 | if (ret) { | ||
| 118 | dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", | ||
| 119 | __func__, ret); | ||
| 120 | return ret; | ||
| 121 | } | ||
| 122 | |||
| 123 | /* Remove power down from PLL and all lanes */ | ||
| 124 | dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff); | ||
| 125 | |||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | |||
| 129 | static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy) | ||
| 130 | { | ||
| 131 | dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0); | ||
| 132 | dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0); | ||
| 133 | |||
| 134 | /* ensure that the phy is completely disabled */ | ||
| 135 | wmb(); | ||
| 136 | } | ||
| 137 | |||
| 138 | static int dsi_14nm_phy_init(struct msm_dsi_phy *phy) | ||
| 139 | { | ||
| 140 | struct platform_device *pdev = phy->pdev; | ||
| 141 | |||
| 142 | phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", | ||
| 143 | "DSI_PHY_LANE"); | ||
| 144 | if (IS_ERR(phy->lane_base)) { | ||
| 145 | dev_err(&pdev->dev, "%s: failed to map phy lane base\n", | ||
| 146 | __func__); | ||
| 147 | return -ENOMEM; | ||
| 148 | } | ||
| 149 | |||
| 150 | return 0; | ||
| 151 | } | ||
| 152 | |||
| 153 | const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = { | ||
| 154 | .type = MSM_DSI_PHY_14NM, | ||
| 155 | .src_pll_truthtable = { {false, false}, {true, false} }, | ||
| 156 | .reg_cfg = { | ||
| 157 | .num = 1, | ||
| 158 | .regs = { | ||
| 159 | {"vcca", 17000, 32}, | ||
| 160 | }, | ||
| 161 | }, | ||
| 162 | .ops = { | ||
| 163 | .enable = dsi_14nm_phy_enable, | ||
| 164 | .disable = dsi_14nm_phy_disable, | ||
| 165 | .init = dsi_14nm_phy_init, | ||
| 166 | }, | ||
| 167 | .io_start = { 0x994400, 0x996400 }, | ||
| 168 | .num_dsi_phy = 2, | ||
| 169 | }; | ||
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index c757e2070cac..1ca6c69516f5 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c | |||
| @@ -72,7 +72,7 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) | |||
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | 74 | static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, |
| 75 | const unsigned long bit_rate, const unsigned long esc_rate) | 75 | struct msm_dsi_phy_clk_request *clk_req) |
| 76 | { | 76 | { |
| 77 | struct msm_dsi_dphy_timing *timing = &phy->timing; | 77 | struct msm_dsi_dphy_timing *timing = &phy->timing; |
| 78 | int i; | 78 | int i; |
| @@ -81,7 +81,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | |||
| 81 | 81 | ||
| 82 | DBG(""); | 82 | DBG(""); |
| 83 | 83 | ||
| 84 | if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { | 84 | if (msm_dsi_dphy_timing_calc(timing, clk_req)) { |
| 85 | dev_err(&phy->pdev->dev, | 85 | dev_err(&phy->pdev->dev, |
| 86 | "%s: D-PHY timing calculation failed\n", __func__); | 86 | "%s: D-PHY timing calculation failed\n", __func__); |
| 87 | return -EINVAL; | 87 | return -EINVAL; |
| @@ -145,6 +145,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { | |||
| 145 | .ops = { | 145 | .ops = { |
| 146 | .enable = dsi_20nm_phy_enable, | 146 | .enable = dsi_20nm_phy_enable, |
| 147 | .disable = dsi_20nm_phy_disable, | 147 | .disable = dsi_20nm_phy_disable, |
| 148 | .init = msm_dsi_phy_init_common, | ||
| 148 | }, | 149 | }, |
| 149 | .io_start = { 0xfd998300, 0xfd9a0300 }, | 150 | .io_start = { 0xfd998300, 0xfd9a0300 }, |
| 150 | .num_dsi_phy = 2, | 151 | .num_dsi_phy = 2, |
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index 63d7fba31380..4972b52cbe44 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c | |||
| @@ -67,7 +67,7 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) | |||
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | 69 | static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, |
| 70 | const unsigned long bit_rate, const unsigned long esc_rate) | 70 | struct msm_dsi_phy_clk_request *clk_req) |
| 71 | { | 71 | { |
| 72 | struct msm_dsi_dphy_timing *timing = &phy->timing; | 72 | struct msm_dsi_dphy_timing *timing = &phy->timing; |
| 73 | int i; | 73 | int i; |
| @@ -75,7 +75,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | |||
| 75 | 75 | ||
| 76 | DBG(""); | 76 | DBG(""); |
| 77 | 77 | ||
| 78 | if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { | 78 | if (msm_dsi_dphy_timing_calc(timing, clk_req)) { |
| 79 | dev_err(&phy->pdev->dev, | 79 | dev_err(&phy->pdev->dev, |
| 80 | "%s: D-PHY timing calculation failed\n", __func__); | 80 | "%s: D-PHY timing calculation failed\n", __func__); |
| 81 | return -EINVAL; | 81 | return -EINVAL; |
| @@ -144,6 +144,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { | |||
| 144 | .ops = { | 144 | .ops = { |
| 145 | .enable = dsi_28nm_phy_enable, | 145 | .enable = dsi_28nm_phy_enable, |
| 146 | .disable = dsi_28nm_phy_disable, | 146 | .disable = dsi_28nm_phy_disable, |
| 147 | .init = msm_dsi_phy_init_common, | ||
| 147 | }, | 148 | }, |
| 148 | .io_start = { 0xfd922b00, 0xfd923100 }, | 149 | .io_start = { 0xfd922b00, 0xfd923100 }, |
| 149 | .num_dsi_phy = 2, | 150 | .num_dsi_phy = 2, |
| @@ -161,6 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { | |||
| 161 | .ops = { | 162 | .ops = { |
| 162 | .enable = dsi_28nm_phy_enable, | 163 | .enable = dsi_28nm_phy_enable, |
| 163 | .disable = dsi_28nm_phy_disable, | 164 | .disable = dsi_28nm_phy_disable, |
| 165 | .init = msm_dsi_phy_init_common, | ||
| 164 | }, | 166 | }, |
| 165 | .io_start = { 0x1a98500 }, | 167 | .io_start = { 0x1a98500 }, |
| 166 | .num_dsi_phy = 1, | 168 | .num_dsi_phy = 1, |
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 7bdb9de54968..398004463498 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c | |||
| @@ -124,14 +124,14 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy) | |||
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, | 126 | static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, |
| 127 | const unsigned long bit_rate, const unsigned long esc_rate) | 127 | struct msm_dsi_phy_clk_request *clk_req) |
| 128 | { | 128 | { |
| 129 | struct msm_dsi_dphy_timing *timing = &phy->timing; | 129 | struct msm_dsi_dphy_timing *timing = &phy->timing; |
| 130 | void __iomem *base = phy->base; | 130 | void __iomem *base = phy->base; |
| 131 | 131 | ||
| 132 | DBG(""); | 132 | DBG(""); |
| 133 | 133 | ||
| 134 | if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { | 134 | if (msm_dsi_dphy_timing_calc(timing, clk_req)) { |
| 135 | dev_err(&phy->pdev->dev, | 135 | dev_err(&phy->pdev->dev, |
| 136 | "%s: D-PHY timing calculation failed\n", __func__); | 136 | "%s: D-PHY timing calculation failed\n", __func__); |
| 137 | return -EINVAL; | 137 | return -EINVAL; |
| @@ -191,6 +191,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { | |||
| 191 | .ops = { | 191 | .ops = { |
| 192 | .enable = dsi_28nm_phy_enable, | 192 | .enable = dsi_28nm_phy_enable, |
| 193 | .disable = dsi_28nm_phy_disable, | 193 | .disable = dsi_28nm_phy_disable, |
| 194 | .init = msm_dsi_phy_init_common, | ||
| 194 | }, | 195 | }, |
| 195 | .io_start = { 0x4700300, 0x5800300 }, | 196 | .io_start = { 0x4700300, 0x5800300 }, |
| 196 | .num_dsi_phy = 2, | 197 | .num_dsi_phy = 2, |
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index 5cd438f91afe..bc289f5c9078 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c | |||
| @@ -140,6 +140,15 @@ int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) | |||
| 140 | return 0; | 140 | return 0; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, | ||
| 144 | enum msm_dsi_phy_usecase uc) | ||
| 145 | { | ||
| 146 | if (pll->set_usecase) | ||
| 147 | return pll->set_usecase(pll, uc); | ||
| 148 | |||
| 149 | return 0; | ||
| 150 | } | ||
| 151 | |||
| 143 | struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, | 152 | struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, |
| 144 | enum msm_dsi_phy_type type, int id) | 153 | enum msm_dsi_phy_type type, int id) |
| 145 | { | 154 | { |
| @@ -154,6 +163,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, | |||
| 154 | case MSM_DSI_PHY_28NM_8960: | 163 | case MSM_DSI_PHY_28NM_8960: |
| 155 | pll = msm_dsi_pll_28nm_8960_init(pdev, id); | 164 | pll = msm_dsi_pll_28nm_8960_init(pdev, id); |
| 156 | break; | 165 | break; |
| 166 | case MSM_DSI_PHY_14NM: | ||
| 167 | pll = msm_dsi_pll_14nm_init(pdev, id); | ||
| 168 | break; | ||
| 157 | default: | 169 | default: |
| 158 | pll = ERR_PTR(-ENXIO); | 170 | pll = ERR_PTR(-ENXIO); |
| 159 | break; | 171 | break; |
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index 2cf1664723e8..f63e7ada74a8 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h | |||
| @@ -41,6 +41,8 @@ struct msm_dsi_pll { | |||
| 41 | void (*destroy)(struct msm_dsi_pll *pll); | 41 | void (*destroy)(struct msm_dsi_pll *pll); |
| 42 | void (*save_state)(struct msm_dsi_pll *pll); | 42 | void (*save_state)(struct msm_dsi_pll *pll); |
| 43 | int (*restore_state)(struct msm_dsi_pll *pll); | 43 | int (*restore_state)(struct msm_dsi_pll *pll); |
| 44 | int (*set_usecase)(struct msm_dsi_pll *pll, | ||
| 45 | enum msm_dsi_phy_usecase uc); | ||
| 44 | }; | 46 | }; |
| 45 | 47 | ||
| 46 | #define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw) | 48 | #define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw) |
| @@ -104,5 +106,14 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init( | |||
| 104 | } | 106 | } |
| 105 | #endif | 107 | #endif |
| 106 | 108 | ||
| 109 | #ifdef CONFIG_DRM_MSM_DSI_14NM_PHY | ||
| 110 | struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id); | ||
| 111 | #else | ||
| 112 | static inline struct msm_dsi_pll * | ||
| 113 | msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) | ||
| 114 | { | ||
| 115 | return ERR_PTR(-ENODEV); | ||
| 116 | } | ||
| 117 | #endif | ||
| 107 | #endif /* __DSI_PLL_H__ */ | 118 | #endif /* __DSI_PLL_H__ */ |
| 108 | 119 | ||
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c new file mode 100644 index 000000000000..fe15aa64086f --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c | |||
| @@ -0,0 +1,1104 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, The Linux Foundation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 and | ||
| 6 | * only version 2 as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/clk-provider.h> | ||
| 16 | |||
| 17 | #include "dsi_pll.h" | ||
| 18 | #include "dsi.xml.h" | ||
| 19 | |||
| 20 | /* | ||
| 21 | * DSI PLL 14nm - clock diagram (eg: DSI0): | ||
| 22 | * | ||
| 23 | * dsi0n1_postdiv_clk | ||
| 24 | * | | ||
| 25 | * | | ||
| 26 | * +----+ | +----+ | ||
| 27 | * dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte | ||
| 28 | * +----+ | +----+ | ||
| 29 | * | dsi0n1_postdivby2_clk | ||
| 30 | * | +----+ | | ||
| 31 | * o---| /2 |--o--|\ | ||
| 32 | * | +----+ | \ +----+ | ||
| 33 | * | | |--| n2 |-- dsi0pll | ||
| 34 | * o--------------| / +----+ | ||
| 35 | * |/ | ||
| 36 | */ | ||
| 37 | |||
| 38 | #define POLL_MAX_READS 15 | ||
| 39 | #define POLL_TIMEOUT_US 1000 | ||
| 40 | |||
| 41 | #define NUM_PROVIDED_CLKS 2 | ||
| 42 | |||
| 43 | #define VCO_REF_CLK_RATE 19200000 | ||
| 44 | #define VCO_MIN_RATE 1300000000UL | ||
| 45 | #define VCO_MAX_RATE 2600000000UL | ||
| 46 | |||
| 47 | #define DSI_BYTE_PLL_CLK 0 | ||
| 48 | #define DSI_PIXEL_PLL_CLK 1 | ||
| 49 | |||
| 50 | #define DSI_PLL_DEFAULT_VCO_POSTDIV 1 | ||
| 51 | |||
| 52 | struct dsi_pll_input { | ||
| 53 | u32 fref; /* reference clk */ | ||
| 54 | u32 fdata; /* bit clock rate */ | ||
| 55 | u32 dsiclk_sel; /* Mux configuration (see diagram) */ | ||
| 56 | u32 ssc_en; /* SSC enable/disable */ | ||
| 57 | u32 ldo_en; | ||
| 58 | |||
| 59 | /* fixed params */ | ||
| 60 | u32 refclk_dbler_en; | ||
| 61 | u32 vco_measure_time; | ||
| 62 | u32 kvco_measure_time; | ||
| 63 | u32 bandgap_timer; | ||
| 64 | u32 pll_wakeup_timer; | ||
| 65 | u32 plllock_cnt; | ||
| 66 | u32 plllock_rng; | ||
| 67 | u32 ssc_center; | ||
| 68 | u32 ssc_adj_period; | ||
| 69 | u32 ssc_spread; | ||
| 70 | u32 ssc_freq; | ||
| 71 | u32 pll_ie_trim; | ||
| 72 | u32 pll_ip_trim; | ||
| 73 | u32 pll_iptat_trim; | ||
| 74 | u32 pll_cpcset_cur; | ||
| 75 | u32 pll_cpmset_cur; | ||
| 76 | |||
| 77 | u32 pll_icpmset; | ||
| 78 | u32 pll_icpcset; | ||
| 79 | |||
| 80 | u32 pll_icpmset_p; | ||
| 81 | u32 pll_icpmset_m; | ||
| 82 | |||
| 83 | u32 pll_icpcset_p; | ||
| 84 | u32 pll_icpcset_m; | ||
| 85 | |||
| 86 | u32 pll_lpf_res1; | ||
| 87 | u32 pll_lpf_cap1; | ||
| 88 | u32 pll_lpf_cap2; | ||
| 89 | u32 pll_c3ctrl; | ||
| 90 | u32 pll_r3ctrl; | ||
| 91 | }; | ||
| 92 | |||
| 93 | struct dsi_pll_output { | ||
| 94 | u32 pll_txclk_en; | ||
| 95 | u32 dec_start; | ||
| 96 | u32 div_frac_start; | ||
| 97 | u32 ssc_period; | ||
| 98 | u32 ssc_step_size; | ||
| 99 | u32 plllock_cmp; | ||
| 100 | u32 pll_vco_div_ref; | ||
| 101 | u32 pll_vco_count; | ||
| 102 | u32 pll_kvco_div_ref; | ||
| 103 | u32 pll_kvco_count; | ||
| 104 | u32 pll_misc1; | ||
| 105 | u32 pll_lpf2_postdiv; | ||
| 106 | u32 pll_resetsm_cntrl; | ||
| 107 | u32 pll_resetsm_cntrl2; | ||
| 108 | u32 pll_resetsm_cntrl5; | ||
| 109 | u32 pll_kvco_code; | ||
| 110 | |||
| 111 | u32 cmn_clk_cfg0; | ||
| 112 | u32 cmn_clk_cfg1; | ||
| 113 | u32 cmn_ldo_cntrl; | ||
| 114 | |||
| 115 | u32 pll_postdiv; | ||
| 116 | u32 fcvo; | ||
| 117 | }; | ||
| 118 | |||
| 119 | struct pll_14nm_cached_state { | ||
| 120 | unsigned long vco_rate; | ||
| 121 | u8 n2postdiv; | ||
| 122 | u8 n1postdiv; | ||
| 123 | }; | ||
| 124 | |||
| 125 | struct dsi_pll_14nm { | ||
| 126 | struct msm_dsi_pll base; | ||
| 127 | |||
| 128 | int id; | ||
| 129 | struct platform_device *pdev; | ||
| 130 | |||
| 131 | void __iomem *phy_cmn_mmio; | ||
| 132 | void __iomem *mmio; | ||
| 133 | |||
| 134 | int vco_delay; | ||
| 135 | |||
| 136 | struct dsi_pll_input in; | ||
| 137 | struct dsi_pll_output out; | ||
| 138 | |||
| 139 | /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */ | ||
| 140 | spinlock_t postdiv_lock; | ||
| 141 | |||
| 142 | u64 vco_current_rate; | ||
| 143 | u64 vco_ref_clk_rate; | ||
| 144 | |||
| 145 | /* private clocks: */ | ||
| 146 | struct clk_hw *hws[NUM_DSI_CLOCKS_MAX]; | ||
| 147 | u32 num_hws; | ||
| 148 | |||
| 149 | /* clock-provider: */ | ||
| 150 | struct clk_hw_onecell_data *hw_data; | ||
| 151 | |||
| 152 | struct pll_14nm_cached_state cached_state; | ||
| 153 | |||
| 154 | enum msm_dsi_phy_usecase uc; | ||
| 155 | struct dsi_pll_14nm *slave; | ||
| 156 | }; | ||
| 157 | |||
| 158 | #define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, base) | ||
| 159 | |||
| 160 | /* | ||
| 161 | * Private struct for N1/N2 post-divider clocks. These clocks are similar to | ||
| 162 | * the generic clk_divider class of clocks. The only difference is that it | ||
| 163 | * also sets the slave DSI PLL's post-dividers if in Dual DSI mode | ||
| 164 | */ | ||
| 165 | struct dsi_pll_14nm_postdiv { | ||
| 166 | struct clk_hw hw; | ||
| 167 | |||
| 168 | /* divider params */ | ||
| 169 | u8 shift; | ||
| 170 | u8 width; | ||
| 171 | u8 flags; /* same flags as used by clk_divider struct */ | ||
| 172 | |||
| 173 | struct dsi_pll_14nm *pll; | ||
| 174 | }; | ||
| 175 | |||
| 176 | #define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw) | ||
| 177 | |||
| 178 | /* | ||
| 179 | * Global list of private DSI PLL struct pointers. We need this for Dual DSI | ||
| 180 | * mode, where the master PLL's clk_ops needs access the slave's private data | ||
| 181 | */ | ||
| 182 | static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; | ||
| 183 | |||
| 184 | static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, | ||
| 185 | u32 nb_tries, u32 timeout_us) | ||
| 186 | { | ||
| 187 | bool pll_locked = false; | ||
| 188 | void __iomem *base = pll_14nm->mmio; | ||
| 189 | u32 tries, val; | ||
| 190 | |||
| 191 | tries = nb_tries; | ||
| 192 | while (tries--) { | ||
| 193 | val = pll_read(base + | ||
| 194 | REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); | ||
| 195 | pll_locked = !!(val & BIT(5)); | ||
| 196 | |||
| 197 | if (pll_locked) | ||
| 198 | break; | ||
| 199 | |||
| 200 | udelay(timeout_us); | ||
| 201 | } | ||
| 202 | |||
| 203 | if (!pll_locked) { | ||
| 204 | tries = nb_tries; | ||
| 205 | while (tries--) { | ||
| 206 | val = pll_read(base + | ||
| 207 | REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); | ||
| 208 | pll_locked = !!(val & BIT(0)); | ||
| 209 | |||
| 210 | if (pll_locked) | ||
| 211 | break; | ||
| 212 | |||
| 213 | udelay(timeout_us); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); | ||
| 218 | |||
| 219 | return pll_locked; | ||
| 220 | } | ||
| 221 | |||
| 222 | static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll) | ||
| 223 | { | ||
| 224 | pll->in.fref = pll->vco_ref_clk_rate; | ||
| 225 | pll->in.fdata = 0; | ||
| 226 | pll->in.dsiclk_sel = 1; /* Use the /2 path in Mux */ | ||
| 227 | pll->in.ldo_en = 0; /* disabled for now */ | ||
| 228 | |||
| 229 | /* fixed input */ | ||
| 230 | pll->in.refclk_dbler_en = 0; | ||
| 231 | pll->in.vco_measure_time = 5; | ||
| 232 | pll->in.kvco_measure_time = 5; | ||
| 233 | pll->in.bandgap_timer = 4; | ||
| 234 | pll->in.pll_wakeup_timer = 5; | ||
| 235 | pll->in.plllock_cnt = 1; | ||
| 236 | pll->in.plllock_rng = 0; | ||
| 237 | |||
| 238 | /* | ||
| 239 | * SSC is enabled by default. We might need DT props for configuring | ||
| 240 | * some SSC params like PPM and center/down spread etc. | ||
| 241 | */ | ||
| 242 | pll->in.ssc_en = 1; | ||
| 243 | pll->in.ssc_center = 0; /* down spread by default */ | ||
| 244 | pll->in.ssc_spread = 5; /* PPM / 1000 */ | ||
| 245 | pll->in.ssc_freq = 31500; /* default recommended */ | ||
| 246 | pll->in.ssc_adj_period = 37; | ||
| 247 | |||
| 248 | pll->in.pll_ie_trim = 4; | ||
| 249 | pll->in.pll_ip_trim = 4; | ||
| 250 | pll->in.pll_cpcset_cur = 1; | ||
| 251 | pll->in.pll_cpmset_cur = 1; | ||
| 252 | pll->in.pll_icpmset = 4; | ||
| 253 | pll->in.pll_icpcset = 4; | ||
| 254 | pll->in.pll_icpmset_p = 0; | ||
| 255 | pll->in.pll_icpmset_m = 0; | ||
| 256 | pll->in.pll_icpcset_p = 0; | ||
| 257 | pll->in.pll_icpcset_m = 0; | ||
| 258 | pll->in.pll_lpf_res1 = 3; | ||
| 259 | pll->in.pll_lpf_cap1 = 11; | ||
| 260 | pll->in.pll_lpf_cap2 = 1; | ||
| 261 | pll->in.pll_iptat_trim = 7; | ||
| 262 | pll->in.pll_c3ctrl = 2; | ||
| 263 | pll->in.pll_r3ctrl = 1; | ||
| 264 | } | ||
| 265 | |||
| 266 | #define CEIL(x, y) (((x) + ((y) - 1)) / (y)) | ||
| 267 | |||
| 268 | static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll) | ||
| 269 | { | ||
| 270 | u32 period, ssc_period; | ||
| 271 | u32 ref, rem; | ||
| 272 | u64 step_size; | ||
| 273 | |||
| 274 | DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate); | ||
| 275 | |||
| 276 | ssc_period = pll->in.ssc_freq / 500; | ||
| 277 | period = (u32)pll->vco_ref_clk_rate / 1000; | ||
| 278 | ssc_period = CEIL(period, ssc_period); | ||
| 279 | ssc_period -= 1; | ||
| 280 | pll->out.ssc_period = ssc_period; | ||
| 281 | |||
| 282 | DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq, | ||
| 283 | pll->in.ssc_spread, pll->out.ssc_period); | ||
| 284 | |||
| 285 | step_size = (u32)pll->vco_current_rate; | ||
| 286 | ref = pll->vco_ref_clk_rate; | ||
| 287 | ref /= 1000; | ||
| 288 | step_size = div_u64(step_size, ref); | ||
| 289 | step_size <<= 20; | ||
| 290 | step_size = div_u64(step_size, 1000); | ||
| 291 | step_size *= pll->in.ssc_spread; | ||
| 292 | step_size = div_u64(step_size, 1000); | ||
| 293 | step_size *= (pll->in.ssc_adj_period + 1); | ||
| 294 | |||
| 295 | rem = 0; | ||
| 296 | step_size = div_u64_rem(step_size, ssc_period + 1, &rem); | ||
| 297 | if (rem) | ||
| 298 | step_size++; | ||
| 299 | |||
| 300 | DBG("step_size=%lld", step_size); | ||
| 301 | |||
| 302 | step_size &= 0x0ffff; /* take lower 16 bits */ | ||
| 303 | |||
| 304 | pll->out.ssc_step_size = step_size; | ||
| 305 | } | ||
| 306 | |||
| 307 | static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll) | ||
| 308 | { | ||
| 309 | struct dsi_pll_input *pin = &pll->in; | ||
| 310 | struct dsi_pll_output *pout = &pll->out; | ||
| 311 | u64 multiplier = BIT(20); | ||
| 312 | u64 dec_start_multiple, dec_start, pll_comp_val; | ||
| 313 | u32 duration, div_frac_start; | ||
| 314 | u64 vco_clk_rate = pll->vco_current_rate; | ||
| 315 | u64 fref = pll->vco_ref_clk_rate; | ||
| 316 | |||
| 317 | DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref); | ||
| 318 | |||
| 319 | dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref); | ||
| 320 | div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); | ||
| 321 | |||
| 322 | dec_start = div_u64(dec_start_multiple, multiplier); | ||
| 323 | |||
| 324 | pout->dec_start = (u32)dec_start; | ||
| 325 | pout->div_frac_start = div_frac_start; | ||
| 326 | |||
| 327 | if (pin->plllock_cnt == 0) | ||
| 328 | duration = 1024; | ||
| 329 | else if (pin->plllock_cnt == 1) | ||
| 330 | duration = 256; | ||
| 331 | else if (pin->plllock_cnt == 2) | ||
| 332 | duration = 128; | ||
| 333 | else | ||
| 334 | duration = 32; | ||
| 335 | |||
| 336 | pll_comp_val = duration * dec_start_multiple; | ||
| 337 | pll_comp_val = div_u64(pll_comp_val, multiplier); | ||
| 338 | do_div(pll_comp_val, 10); | ||
| 339 | |||
| 340 | pout->plllock_cmp = (u32)pll_comp_val; | ||
| 341 | |||
| 342 | pout->pll_txclk_en = 1; | ||
| 343 | pout->cmn_ldo_cntrl = 0x3c; | ||
| 344 | } | ||
| 345 | |||
| 346 | static u32 pll_14nm_kvco_slop(u32 vrate) | ||
| 347 | { | ||
| 348 | u32 slop = 0; | ||
| 349 | |||
| 350 | if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL) | ||
| 351 | slop = 600; | ||
| 352 | else if (vrate > 1800000000UL && vrate < 2300000000UL) | ||
| 353 | slop = 400; | ||
| 354 | else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE) | ||
| 355 | slop = 280; | ||
| 356 | |||
| 357 | return slop; | ||
| 358 | } | ||
| 359 | |||
| 360 | static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll) | ||
| 361 | { | ||
| 362 | struct dsi_pll_input *pin = &pll->in; | ||
| 363 | struct dsi_pll_output *pout = &pll->out; | ||
| 364 | u64 vco_clk_rate = pll->vco_current_rate; | ||
| 365 | u64 fref = pll->vco_ref_clk_rate; | ||
| 366 | u64 data; | ||
| 367 | u32 cnt; | ||
| 368 | |||
| 369 | data = fref * pin->vco_measure_time; | ||
| 370 | do_div(data, 1000000); | ||
| 371 | data &= 0x03ff; /* 10 bits */ | ||
| 372 | data -= 2; | ||
| 373 | pout->pll_vco_div_ref = data; | ||
| 374 | |||
| 375 | data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */ | ||
| 376 | data *= pin->vco_measure_time; | ||
| 377 | do_div(data, 10); | ||
| 378 | pout->pll_vco_count = data; | ||
| 379 | |||
| 380 | data = fref * pin->kvco_measure_time; | ||
| 381 | do_div(data, 1000000); | ||
| 382 | data &= 0x03ff; /* 10 bits */ | ||
| 383 | data -= 1; | ||
| 384 | pout->pll_kvco_div_ref = data; | ||
| 385 | |||
| 386 | cnt = pll_14nm_kvco_slop(vco_clk_rate); | ||
| 387 | cnt *= 2; | ||
| 388 | cnt /= 100; | ||
| 389 | cnt *= pin->kvco_measure_time; | ||
| 390 | pout->pll_kvco_count = cnt; | ||
| 391 | |||
| 392 | pout->pll_misc1 = 16; | ||
| 393 | pout->pll_resetsm_cntrl = 48; | ||
| 394 | pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3; | ||
| 395 | pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer; | ||
| 396 | pout->pll_kvco_code = 0; | ||
| 397 | } | ||
| 398 | |||
| 399 | static void pll_db_commit_ssc(struct dsi_pll_14nm *pll) | ||
| 400 | { | ||
| 401 | void __iomem *base = pll->mmio; | ||
| 402 | struct dsi_pll_input *pin = &pll->in; | ||
| 403 | struct dsi_pll_output *pout = &pll->out; | ||
| 404 | u8 data; | ||
| 405 | |||
| 406 | data = pin->ssc_adj_period; | ||
| 407 | data &= 0x0ff; | ||
| 408 | pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data); | ||
| 409 | data = (pin->ssc_adj_period >> 8); | ||
| 410 | data &= 0x03; | ||
| 411 | pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data); | ||
| 412 | |||
| 413 | data = pout->ssc_period; | ||
| 414 | data &= 0x0ff; | ||
| 415 | pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data); | ||
| 416 | data = (pout->ssc_period >> 8); | ||
| 417 | data &= 0x0ff; | ||
| 418 | pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data); | ||
| 419 | |||
| 420 | data = pout->ssc_step_size; | ||
| 421 | data &= 0x0ff; | ||
| 422 | pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data); | ||
| 423 | data = (pout->ssc_step_size >> 8); | ||
| 424 | data &= 0x0ff; | ||
| 425 | pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data); | ||
| 426 | |||
| 427 | data = (pin->ssc_center & 0x01); | ||
| 428 | data <<= 1; | ||
| 429 | data |= 0x01; /* enable */ | ||
| 430 | pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data); | ||
| 431 | |||
| 432 | wmb(); /* make sure register committed */ | ||
| 433 | } | ||
| 434 | |||
| 435 | static void pll_db_commit_common(struct dsi_pll_14nm *pll, | ||
| 436 | struct dsi_pll_input *pin, | ||
| 437 | struct dsi_pll_output *pout) | ||
| 438 | { | ||
| 439 | void __iomem *base = pll->mmio; | ||
| 440 | u8 data; | ||
| 441 | |||
| 442 | /* confgiure the non frequency dependent pll registers */ | ||
| 443 | data = 0; | ||
| 444 | pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data); | ||
| 445 | |||
| 446 | data = pout->pll_txclk_en; | ||
| 447 | pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data); | ||
| 448 | |||
| 449 | data = pout->pll_resetsm_cntrl; | ||
| 450 | pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data); | ||
| 451 | data = pout->pll_resetsm_cntrl2; | ||
| 452 | pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data); | ||
| 453 | data = pout->pll_resetsm_cntrl5; | ||
| 454 | pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data); | ||
| 455 | |||
| 456 | data = pout->pll_vco_div_ref & 0xff; | ||
| 457 | pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data); | ||
| 458 | data = (pout->pll_vco_div_ref >> 8) & 0x3; | ||
| 459 | pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data); | ||
| 460 | |||
| 461 | data = pout->pll_kvco_div_ref & 0xff; | ||
| 462 | pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data); | ||
| 463 | data = (pout->pll_kvco_div_ref >> 8) & 0x3; | ||
| 464 | pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data); | ||
| 465 | |||
| 466 | data = pout->pll_misc1; | ||
| 467 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data); | ||
| 468 | |||
| 469 | data = pin->pll_ie_trim; | ||
| 470 | pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data); | ||
| 471 | |||
| 472 | data = pin->pll_ip_trim; | ||
| 473 | pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data); | ||
| 474 | |||
| 475 | data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur; | ||
| 476 | pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data); | ||
| 477 | |||
| 478 | data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m; | ||
| 479 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data); | ||
| 480 | |||
| 481 | data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m; | ||
| 482 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data); | ||
| 483 | |||
| 484 | data = pin->pll_icpmset << 3 | pin->pll_icpcset; | ||
| 485 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data); | ||
| 486 | |||
| 487 | data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1; | ||
| 488 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data); | ||
| 489 | |||
| 490 | data = pin->pll_iptat_trim; | ||
| 491 | pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data); | ||
| 492 | |||
| 493 | data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4; | ||
| 494 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data); | ||
| 495 | } | ||
| 496 | |||
| 497 | static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm) | ||
| 498 | { | ||
| 499 | void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; | ||
| 500 | |||
| 501 | /* de assert pll start and apply pll sw reset */ | ||
| 502 | |||
| 503 | /* stop pll */ | ||
| 504 | pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0); | ||
| 505 | |||
| 506 | /* pll sw reset */ | ||
| 507 | pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10); | ||
| 508 | wmb(); /* make sure register committed */ | ||
| 509 | |||
| 510 | pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0); | ||
| 511 | wmb(); /* make sure register committed */ | ||
| 512 | } | ||
| 513 | |||
| 514 | static void pll_db_commit_14nm(struct dsi_pll_14nm *pll, | ||
| 515 | struct dsi_pll_input *pin, | ||
| 516 | struct dsi_pll_output *pout) | ||
| 517 | { | ||
| 518 | void __iomem *base = pll->mmio; | ||
| 519 | void __iomem *cmn_base = pll->phy_cmn_mmio; | ||
| 520 | u8 data; | ||
| 521 | |||
| 522 | DBG("DSI%d PLL", pll->id); | ||
| 523 | |||
| 524 | data = pout->cmn_ldo_cntrl; | ||
| 525 | pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data); | ||
| 526 | |||
| 527 | pll_db_commit_common(pll, pin, pout); | ||
| 528 | |||
| 529 | pll_14nm_software_reset(pll); | ||
| 530 | |||
| 531 | data = pin->dsiclk_sel; /* set dsiclk_sel = 1 */ | ||
| 532 | pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data); | ||
| 533 | |||
| 534 | data = 0xff; /* data, clk, pll normal operation */ | ||
| 535 | pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data); | ||
| 536 | |||
| 537 | /* configure the frequency dependent pll registers */ | ||
| 538 | data = pout->dec_start; | ||
| 539 | pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data); | ||
| 540 | |||
| 541 | data = pout->div_frac_start & 0xff; | ||
| 542 | pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data); | ||
| 543 | data = (pout->div_frac_start >> 8) & 0xff; | ||
| 544 | pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data); | ||
| 545 | data = (pout->div_frac_start >> 16) & 0xf; | ||
| 546 | pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data); | ||
| 547 | |||
| 548 | data = pout->plllock_cmp & 0xff; | ||
| 549 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data); | ||
| 550 | |||
| 551 | data = (pout->plllock_cmp >> 8) & 0xff; | ||
| 552 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data); | ||
| 553 | |||
| 554 | data = (pout->plllock_cmp >> 16) & 0x3; | ||
| 555 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data); | ||
| 556 | |||
| 557 | data = pin->plllock_cnt << 1 | pin->plllock_rng << 3; | ||
| 558 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data); | ||
| 559 | |||
| 560 | data = pout->pll_vco_count & 0xff; | ||
| 561 | pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data); | ||
| 562 | data = (pout->pll_vco_count >> 8) & 0xff; | ||
| 563 | pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data); | ||
| 564 | |||
| 565 | data = pout->pll_kvco_count & 0xff; | ||
| 566 | pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data); | ||
| 567 | data = (pout->pll_kvco_count >> 8) & 0x3; | ||
| 568 | pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data); | ||
| 569 | |||
| 570 | data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1; | ||
| 571 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data); | ||
| 572 | |||
| 573 | if (pin->ssc_en) | ||
| 574 | pll_db_commit_ssc(pll); | ||
| 575 | |||
| 576 | wmb(); /* make sure register committed */ | ||
| 577 | } | ||
| 578 | |||
| 579 | /* | ||
| 580 | * VCO clock Callbacks | ||
| 581 | */ | ||
| 582 | static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, | ||
| 583 | unsigned long parent_rate) | ||
| 584 | { | ||
| 585 | struct msm_dsi_pll *pll = hw_clk_to_pll(hw); | ||
| 586 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 587 | struct dsi_pll_input *pin = &pll_14nm->in; | ||
| 588 | struct dsi_pll_output *pout = &pll_14nm->out; | ||
| 589 | |||
| 590 | DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate, | ||
| 591 | parent_rate); | ||
| 592 | |||
| 593 | pll_14nm->vco_current_rate = rate; | ||
| 594 | pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; | ||
| 595 | |||
| 596 | dsi_pll_14nm_input_init(pll_14nm); | ||
| 597 | |||
| 598 | /* | ||
| 599 | * This configures the post divider internal to the VCO. It's | ||
| 600 | * fixed to divide by 1 for now. | ||
| 601 | * | ||
| 602 | * tx_band = pll_postdiv. | ||
| 603 | * 0: divided by 1 | ||
| 604 | * 1: divided by 2 | ||
| 605 | * 2: divided by 4 | ||
| 606 | * 3: divided by 8 | ||
| 607 | */ | ||
| 608 | pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV; | ||
| 609 | |||
| 610 | pll_14nm_dec_frac_calc(pll_14nm); | ||
| 611 | |||
| 612 | if (pin->ssc_en) | ||
| 613 | pll_14nm_ssc_calc(pll_14nm); | ||
| 614 | |||
| 615 | pll_14nm_calc_vco_count(pll_14nm); | ||
| 616 | |||
| 617 | /* commit the slave DSI PLL registers if we're master. Note that we | ||
| 618 | * don't lock the slave PLL. We just ensure that the PLL/PHY registers | ||
| 619 | * of the master and slave are identical | ||
| 620 | */ | ||
| 621 | if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { | ||
| 622 | struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; | ||
| 623 | |||
| 624 | pll_db_commit_14nm(pll_14nm_slave, pin, pout); | ||
| 625 | } | ||
| 626 | |||
| 627 | pll_db_commit_14nm(pll_14nm, pin, pout); | ||
| 628 | |||
| 629 | return 0; | ||
| 630 | } | ||
| 631 | |||
| 632 | static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw, | ||
| 633 | unsigned long parent_rate) | ||
| 634 | { | ||
| 635 | struct msm_dsi_pll *pll = hw_clk_to_pll(hw); | ||
| 636 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 637 | void __iomem *base = pll_14nm->mmio; | ||
| 638 | u64 vco_rate, multiplier = BIT(20); | ||
| 639 | u32 div_frac_start; | ||
| 640 | u32 dec_start; | ||
| 641 | u64 ref_clk = parent_rate; | ||
| 642 | |||
| 643 | dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START); | ||
| 644 | dec_start &= 0x0ff; | ||
| 645 | |||
| 646 | DBG("dec_start = %x", dec_start); | ||
| 647 | |||
| 648 | div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3) | ||
| 649 | & 0xf) << 16; | ||
| 650 | div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2) | ||
| 651 | & 0xff) << 8; | ||
| 652 | div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1) | ||
| 653 | & 0xff; | ||
| 654 | |||
| 655 | DBG("div_frac_start = %x", div_frac_start); | ||
| 656 | |||
| 657 | vco_rate = ref_clk * dec_start; | ||
| 658 | |||
| 659 | vco_rate += ((ref_clk * div_frac_start) / multiplier); | ||
| 660 | |||
| 661 | /* | ||
| 662 | * Recalculating the rate from dec_start and frac_start doesn't end up | ||
| 663 | * the rate we originally set. Convert the freq to KHz, round it up and | ||
| 664 | * convert it back to MHz. | ||
| 665 | */ | ||
| 666 | vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000; | ||
| 667 | |||
| 668 | DBG("returning vco rate = %lu", (unsigned long)vco_rate); | ||
| 669 | |||
| 670 | return (unsigned long)vco_rate; | ||
| 671 | } | ||
| 672 | |||
| 673 | static const struct clk_ops clk_ops_dsi_pll_14nm_vco = { | ||
| 674 | .round_rate = msm_dsi_pll_helper_clk_round_rate, | ||
| 675 | .set_rate = dsi_pll_14nm_vco_set_rate, | ||
| 676 | .recalc_rate = dsi_pll_14nm_vco_recalc_rate, | ||
| 677 | .prepare = msm_dsi_pll_helper_clk_prepare, | ||
| 678 | .unprepare = msm_dsi_pll_helper_clk_unprepare, | ||
| 679 | }; | ||
| 680 | |||
| 681 | /* | ||
| 682 | * N1 and N2 post-divider clock callbacks | ||
| 683 | */ | ||
| 684 | #define div_mask(width) ((1 << (width)) - 1) | ||
| 685 | static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw, | ||
| 686 | unsigned long parent_rate) | ||
| 687 | { | ||
| 688 | struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); | ||
| 689 | struct dsi_pll_14nm *pll_14nm = postdiv->pll; | ||
| 690 | void __iomem *base = pll_14nm->phy_cmn_mmio; | ||
| 691 | u8 shift = postdiv->shift; | ||
| 692 | u8 width = postdiv->width; | ||
| 693 | u32 val; | ||
| 694 | |||
| 695 | DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate); | ||
| 696 | |||
| 697 | val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift; | ||
| 698 | val &= div_mask(width); | ||
| 699 | |||
| 700 | return divider_recalc_rate(hw, parent_rate, val, NULL, | ||
| 701 | postdiv->flags); | ||
| 702 | } | ||
| 703 | |||
| 704 | static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw, | ||
| 705 | unsigned long rate, | ||
| 706 | unsigned long *prate) | ||
| 707 | { | ||
| 708 | struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); | ||
| 709 | struct dsi_pll_14nm *pll_14nm = postdiv->pll; | ||
| 710 | |||
| 711 | DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate); | ||
| 712 | |||
| 713 | return divider_round_rate(hw, rate, prate, NULL, | ||
| 714 | postdiv->width, | ||
| 715 | postdiv->flags); | ||
| 716 | } | ||
| 717 | |||
| 718 | static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, | ||
| 719 | unsigned long parent_rate) | ||
| 720 | { | ||
| 721 | struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); | ||
| 722 | struct dsi_pll_14nm *pll_14nm = postdiv->pll; | ||
| 723 | void __iomem *base = pll_14nm->phy_cmn_mmio; | ||
| 724 | spinlock_t *lock = &pll_14nm->postdiv_lock; | ||
| 725 | u8 shift = postdiv->shift; | ||
| 726 | u8 width = postdiv->width; | ||
| 727 | unsigned int value; | ||
| 728 | unsigned long flags = 0; | ||
| 729 | u32 val; | ||
| 730 | |||
| 731 | DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate, | ||
| 732 | parent_rate); | ||
| 733 | |||
| 734 | value = divider_get_val(rate, parent_rate, NULL, postdiv->width, | ||
| 735 | postdiv->flags); | ||
| 736 | |||
| 737 | spin_lock_irqsave(lock, flags); | ||
| 738 | |||
| 739 | val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); | ||
| 740 | val &= ~(div_mask(width) << shift); | ||
| 741 | |||
| 742 | val |= value << shift; | ||
| 743 | pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); | ||
| 744 | |||
| 745 | /* If we're master in dual DSI mode, then the slave PLL's post-dividers | ||
| 746 | * follow the master's post dividers | ||
| 747 | */ | ||
| 748 | if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { | ||
| 749 | struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; | ||
| 750 | void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio; | ||
| 751 | |||
| 752 | pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); | ||
| 753 | } | ||
| 754 | |||
| 755 | spin_unlock_irqrestore(lock, flags); | ||
| 756 | |||
| 757 | return 0; | ||
| 758 | } | ||
| 759 | |||
| 760 | static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = { | ||
| 761 | .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate, | ||
| 762 | .round_rate = dsi_pll_14nm_postdiv_round_rate, | ||
| 763 | .set_rate = dsi_pll_14nm_postdiv_set_rate, | ||
| 764 | }; | ||
| 765 | |||
| 766 | /* | ||
| 767 | * PLL Callbacks | ||
| 768 | */ | ||
| 769 | |||
| 770 | static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll) | ||
| 771 | { | ||
| 772 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 773 | void __iomem *base = pll_14nm->mmio; | ||
| 774 | void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; | ||
| 775 | bool locked; | ||
| 776 | |||
| 777 | DBG(""); | ||
| 778 | |||
| 779 | pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10); | ||
| 780 | pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1); | ||
| 781 | |||
| 782 | locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS, | ||
| 783 | POLL_TIMEOUT_US); | ||
| 784 | |||
| 785 | if (unlikely(!locked)) | ||
| 786 | dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n"); | ||
| 787 | else | ||
| 788 | DBG("DSI PLL lock success"); | ||
| 789 | |||
| 790 | return locked ? 0 : -EINVAL; | ||
| 791 | } | ||
| 792 | |||
| 793 | static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll) | ||
| 794 | { | ||
| 795 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 796 | void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; | ||
| 797 | |||
| 798 | DBG(""); | ||
| 799 | |||
| 800 | pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0); | ||
| 801 | } | ||
| 802 | |||
| 803 | static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll) | ||
| 804 | { | ||
| 805 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 806 | struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; | ||
| 807 | void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; | ||
| 808 | u32 data; | ||
| 809 | |||
| 810 | data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); | ||
| 811 | |||
| 812 | cached_state->n1postdiv = data & 0xf; | ||
| 813 | cached_state->n2postdiv = (data >> 4) & 0xf; | ||
| 814 | |||
| 815 | DBG("DSI%d PLL save state %x %x", pll_14nm->id, | ||
| 816 | cached_state->n1postdiv, cached_state->n2postdiv); | ||
| 817 | |||
| 818 | cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw); | ||
| 819 | } | ||
| 820 | |||
| 821 | static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll) | ||
| 822 | { | ||
| 823 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 824 | struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; | ||
| 825 | void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; | ||
| 826 | u32 data; | ||
| 827 | int ret; | ||
| 828 | |||
| 829 | ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw, | ||
| 830 | cached_state->vco_rate, 0); | ||
| 831 | if (ret) { | ||
| 832 | dev_err(&pll_14nm->pdev->dev, | ||
| 833 | "restore vco rate failed. ret=%d\n", ret); | ||
| 834 | return ret; | ||
| 835 | } | ||
| 836 | |||
| 837 | data = cached_state->n1postdiv | (cached_state->n2postdiv << 4); | ||
| 838 | |||
| 839 | DBG("DSI%d PLL restore state %x %x", pll_14nm->id, | ||
| 840 | cached_state->n1postdiv, cached_state->n2postdiv); | ||
| 841 | |||
| 842 | pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data); | ||
| 843 | |||
| 844 | /* also restore post-dividers for slave DSI PLL */ | ||
| 845 | if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { | ||
| 846 | struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; | ||
| 847 | void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio; | ||
| 848 | |||
| 849 | pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data); | ||
| 850 | } | ||
| 851 | |||
| 852 | return 0; | ||
| 853 | } | ||
| 854 | |||
| 855 | static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll, | ||
| 856 | enum msm_dsi_phy_usecase uc) | ||
| 857 | { | ||
| 858 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 859 | void __iomem *base = pll_14nm->mmio; | ||
| 860 | u32 clkbuflr_en, bandgap = 0; | ||
| 861 | |||
| 862 | switch (uc) { | ||
| 863 | case MSM_DSI_PHY_STANDALONE: | ||
| 864 | clkbuflr_en = 0x1; | ||
| 865 | break; | ||
| 866 | case MSM_DSI_PHY_MASTER: | ||
| 867 | clkbuflr_en = 0x3; | ||
| 868 | pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX]; | ||
| 869 | break; | ||
| 870 | case MSM_DSI_PHY_SLAVE: | ||
| 871 | clkbuflr_en = 0x0; | ||
| 872 | bandgap = 0x3; | ||
| 873 | break; | ||
| 874 | default: | ||
| 875 | return -EINVAL; | ||
| 876 | } | ||
| 877 | |||
| 878 | pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en); | ||
| 879 | if (bandgap) | ||
| 880 | pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap); | ||
| 881 | |||
| 882 | pll_14nm->uc = uc; | ||
| 883 | |||
| 884 | return 0; | ||
| 885 | } | ||
| 886 | |||
| 887 | static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll, | ||
| 888 | struct clk **byte_clk_provider, | ||
| 889 | struct clk **pixel_clk_provider) | ||
| 890 | { | ||
| 891 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 892 | struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data; | ||
| 893 | |||
| 894 | if (byte_clk_provider) | ||
| 895 | *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk; | ||
| 896 | if (pixel_clk_provider) | ||
| 897 | *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk; | ||
| 898 | |||
| 899 | return 0; | ||
| 900 | } | ||
| 901 | |||
| 902 | static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll) | ||
| 903 | { | ||
| 904 | struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); | ||
| 905 | struct platform_device *pdev = pll_14nm->pdev; | ||
| 906 | int num_hws = pll_14nm->num_hws; | ||
| 907 | |||
| 908 | of_clk_del_provider(pdev->dev.of_node); | ||
| 909 | |||
| 910 | while (num_hws--) | ||
| 911 | clk_hw_unregister(pll_14nm->hws[num_hws]); | ||
| 912 | } | ||
| 913 | |||
| 914 | static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm, | ||
| 915 | const char *name, | ||
| 916 | const char *parent_name, | ||
| 917 | unsigned long flags, | ||
| 918 | u8 shift) | ||
| 919 | { | ||
| 920 | struct dsi_pll_14nm_postdiv *pll_postdiv; | ||
| 921 | struct device *dev = &pll_14nm->pdev->dev; | ||
| 922 | struct clk_init_data postdiv_init = { | ||
| 923 | .parent_names = (const char *[]) { parent_name }, | ||
| 924 | .num_parents = 1, | ||
| 925 | .name = name, | ||
| 926 | .flags = flags, | ||
| 927 | .ops = &clk_ops_dsi_pll_14nm_postdiv, | ||
| 928 | }; | ||
| 929 | int ret; | ||
| 930 | |||
| 931 | pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL); | ||
| 932 | if (!pll_postdiv) | ||
| 933 | return ERR_PTR(-ENOMEM); | ||
| 934 | |||
| 935 | pll_postdiv->pll = pll_14nm; | ||
| 936 | pll_postdiv->shift = shift; | ||
| 937 | /* both N1 and N2 postdividers are 4 bits wide */ | ||
| 938 | pll_postdiv->width = 4; | ||
| 939 | /* range of each divider is from 1 to 15 */ | ||
| 940 | pll_postdiv->flags = CLK_DIVIDER_ONE_BASED; | ||
| 941 | pll_postdiv->hw.init = &postdiv_init; | ||
| 942 | |||
| 943 | ret = clk_hw_register(dev, &pll_postdiv->hw); | ||
| 944 | if (ret) | ||
| 945 | return ERR_PTR(ret); | ||
| 946 | |||
| 947 | return &pll_postdiv->hw; | ||
| 948 | } | ||
| 949 | |||
| 950 | static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm) | ||
| 951 | { | ||
| 952 | char clk_name[32], parent[32], vco_name[32]; | ||
| 953 | struct clk_init_data vco_init = { | ||
| 954 | .parent_names = (const char *[]){ "xo" }, | ||
| 955 | .num_parents = 1, | ||
| 956 | .name = vco_name, | ||
| 957 | .flags = CLK_IGNORE_UNUSED, | ||
| 958 | .ops = &clk_ops_dsi_pll_14nm_vco, | ||
| 959 | }; | ||
| 960 | struct device *dev = &pll_14nm->pdev->dev; | ||
| 961 | struct clk_hw **hws = pll_14nm->hws; | ||
| 962 | struct clk_hw_onecell_data *hw_data; | ||
| 963 | struct clk_hw *hw; | ||
| 964 | int num = 0; | ||
| 965 | int ret; | ||
| 966 | |||
| 967 | DBG("DSI%d", pll_14nm->id); | ||
| 968 | |||
| 969 | hw_data = devm_kzalloc(dev, sizeof(*hw_data) + | ||
| 970 | NUM_PROVIDED_CLKS * sizeof(struct clk_hw *), | ||
| 971 | GFP_KERNEL); | ||
| 972 | if (!hw_data) | ||
| 973 | return -ENOMEM; | ||
| 974 | |||
| 975 | snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id); | ||
| 976 | pll_14nm->base.clk_hw.init = &vco_init; | ||
| 977 | |||
| 978 | ret = clk_hw_register(dev, &pll_14nm->base.clk_hw); | ||
| 979 | if (ret) | ||
| 980 | return ret; | ||
| 981 | |||
| 982 | hws[num++] = &pll_14nm->base.clk_hw; | ||
| 983 | |||
| 984 | snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); | ||
| 985 | snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id); | ||
| 986 | |||
| 987 | /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */ | ||
| 988 | hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, | ||
| 989 | CLK_SET_RATE_PARENT, 0); | ||
| 990 | if (IS_ERR(hw)) | ||
| 991 | return PTR_ERR(hw); | ||
| 992 | |||
| 993 | hws[num++] = hw; | ||
| 994 | |||
| 995 | snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id); | ||
| 996 | snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); | ||
| 997 | |||
| 998 | /* DSI Byte clock = VCO_CLK / N1 / 8 */ | ||
| 999 | hw = clk_hw_register_fixed_factor(dev, clk_name, parent, | ||
| 1000 | CLK_SET_RATE_PARENT, 1, 8); | ||
| 1001 | if (IS_ERR(hw)) | ||
| 1002 | return PTR_ERR(hw); | ||
| 1003 | |||
| 1004 | hws[num++] = hw; | ||
| 1005 | hw_data->hws[DSI_BYTE_PLL_CLK] = hw; | ||
| 1006 | |||
| 1007 | snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id); | ||
| 1008 | snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); | ||
| 1009 | |||
| 1010 | /* | ||
| 1011 | * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider | ||
| 1012 | * on the way. Don't let it set parent. | ||
| 1013 | */ | ||
| 1014 | hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2); | ||
| 1015 | if (IS_ERR(hw)) | ||
| 1016 | return PTR_ERR(hw); | ||
| 1017 | |||
| 1018 | hws[num++] = hw; | ||
| 1019 | |||
| 1020 | snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id); | ||
| 1021 | snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id); | ||
| 1022 | |||
| 1023 | /* DSI pixel clock = VCO_CLK / N1 / 2 / N2 | ||
| 1024 | * This is the output of N2 post-divider, bits 4-7 in | ||
| 1025 | * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent. | ||
| 1026 | */ | ||
| 1027 | hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4); | ||
| 1028 | if (IS_ERR(hw)) | ||
| 1029 | return PTR_ERR(hw); | ||
| 1030 | |||
| 1031 | hws[num++] = hw; | ||
| 1032 | hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; | ||
| 1033 | |||
| 1034 | pll_14nm->num_hws = num; | ||
| 1035 | |||
| 1036 | hw_data->num = NUM_PROVIDED_CLKS; | ||
| 1037 | pll_14nm->hw_data = hw_data; | ||
| 1038 | |||
| 1039 | ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, | ||
| 1040 | pll_14nm->hw_data); | ||
| 1041 | if (ret) { | ||
| 1042 | dev_err(dev, "failed to register clk provider: %d\n", ret); | ||
| 1043 | return ret; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | return 0; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) | ||
| 1050 | { | ||
| 1051 | struct dsi_pll_14nm *pll_14nm; | ||
| 1052 | struct msm_dsi_pll *pll; | ||
| 1053 | int ret; | ||
| 1054 | |||
| 1055 | if (!pdev) | ||
| 1056 | return ERR_PTR(-ENODEV); | ||
| 1057 | |||
| 1058 | pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL); | ||
| 1059 | if (!pll_14nm) | ||
| 1060 | return ERR_PTR(-ENOMEM); | ||
| 1061 | |||
| 1062 | DBG("PLL%d", id); | ||
| 1063 | |||
| 1064 | pll_14nm->pdev = pdev; | ||
| 1065 | pll_14nm->id = id; | ||
| 1066 | pll_14nm_list[id] = pll_14nm; | ||
| 1067 | |||
| 1068 | pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); | ||
| 1069 | if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) { | ||
| 1070 | dev_err(&pdev->dev, "failed to map CMN PHY base\n"); | ||
| 1071 | return ERR_PTR(-ENOMEM); | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); | ||
| 1075 | if (IS_ERR_OR_NULL(pll_14nm->mmio)) { | ||
| 1076 | dev_err(&pdev->dev, "failed to map PLL base\n"); | ||
| 1077 | return ERR_PTR(-ENOMEM); | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | spin_lock_init(&pll_14nm->postdiv_lock); | ||
| 1081 | |||
| 1082 | pll = &pll_14nm->base; | ||
| 1083 | pll->min_rate = VCO_MIN_RATE; | ||
| 1084 | pll->max_rate = VCO_MAX_RATE; | ||
| 1085 | pll->get_provider = dsi_pll_14nm_get_provider; | ||
| 1086 | pll->destroy = dsi_pll_14nm_destroy; | ||
| 1087 | pll->disable_seq = dsi_pll_14nm_disable_seq; | ||
| 1088 | pll->save_state = dsi_pll_14nm_save_state; | ||
| 1089 | pll->restore_state = dsi_pll_14nm_restore_state; | ||
| 1090 | pll->set_usecase = dsi_pll_14nm_set_usecase; | ||
| 1091 | |||
| 1092 | pll_14nm->vco_delay = 1; | ||
| 1093 | |||
| 1094 | pll->en_seq_cnt = 1; | ||
| 1095 | pll->enable_seqs[0] = dsi_pll_14nm_enable_seq; | ||
| 1096 | |||
| 1097 | ret = pll_14nm_register(pll_14nm); | ||
| 1098 | if (ret) { | ||
| 1099 | dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); | ||
| 1100 | return ERR_PTR(ret); | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | return pll; | ||
| 1104 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index b782efd4b95f..94ea963519b2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | |||
| @@ -260,8 +260,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, | |||
| 260 | struct drm_encoder *encoder; | 260 | struct drm_encoder *encoder; |
| 261 | struct drm_connector *connector; | 261 | struct drm_connector *connector; |
| 262 | struct device_node *panel_node; | 262 | struct device_node *panel_node; |
| 263 | struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM]; | 263 | int dsi_id; |
| 264 | int i, dsi_id; | ||
| 265 | int ret; | 264 | int ret; |
| 266 | 265 | ||
| 267 | switch (intf_type) { | 266 | switch (intf_type) { |
| @@ -322,22 +321,19 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, | |||
| 322 | if (!priv->dsi[dsi_id]) | 321 | if (!priv->dsi[dsi_id]) |
| 323 | break; | 322 | break; |
| 324 | 323 | ||
| 325 | for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { | 324 | encoder = mdp4_dsi_encoder_init(dev); |
| 326 | dsi_encs[i] = mdp4_dsi_encoder_init(dev); | 325 | if (IS_ERR(encoder)) { |
| 327 | if (IS_ERR(dsi_encs[i])) { | 326 | ret = PTR_ERR(encoder); |
| 328 | ret = PTR_ERR(dsi_encs[i]); | 327 | dev_err(dev->dev, |
| 329 | dev_err(dev->dev, | 328 | "failed to construct DSI encoder: %d\n", ret); |
| 330 | "failed to construct DSI encoder: %d\n", | 329 | return ret; |
| 331 | ret); | ||
| 332 | return ret; | ||
| 333 | } | ||
| 334 | |||
| 335 | /* TODO: Add DMA_S later? */ | ||
| 336 | dsi_encs[i]->possible_crtcs = 1 << DMA_P; | ||
| 337 | priv->encoders[priv->num_encoders++] = dsi_encs[i]; | ||
| 338 | } | 330 | } |
| 339 | 331 | ||
| 340 | ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); | 332 | /* TODO: Add DMA_S later? */ |
| 333 | encoder->possible_crtcs = 1 << DMA_P; | ||
| 334 | priv->encoders[priv->num_encoders++] = encoder; | ||
| 335 | |||
| 336 | ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); | ||
| 341 | if (ret) { | 337 | if (ret) { |
| 342 | dev_err(dev->dev, "failed to initialize DSI: %d\n", | 338 | dev_err(dev->dev, "failed to initialize DSI: %d\n", |
| 343 | ret); | 339 | ret); |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 27d5371acee0..e6dfc518d4db 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | |||
| @@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) | 11 | - /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-01-11 05:19:19) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) | 12 | - /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) | 13 | - /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) | 14 | |
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) | 15 | Copyright (C) 2013-2017 by the following authors: |
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) | ||
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) | ||
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) | ||
| 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) | ||
| 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) | ||
| 21 | - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) | ||
| 22 | |||
| 23 | Copyright (C) 2013-2016 by the following authors: | ||
| 24 | - Rob Clark <robdclark@gmail.com> (robclark) | 16 | - Rob Clark <robdclark@gmail.com> (robclark) |
| 25 | - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) | 17 | - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) |
| 26 | 18 | ||
| @@ -65,16 +57,19 @@ enum mdp5_intfnum { | |||
| 65 | }; | 57 | }; |
| 66 | 58 | ||
| 67 | enum mdp5_pipe { | 59 | enum mdp5_pipe { |
| 68 | SSPP_VIG0 = 0, | 60 | SSPP_NONE = 0, |
| 69 | SSPP_VIG1 = 1, | 61 | SSPP_VIG0 = 1, |
| 70 | SSPP_VIG2 = 2, | 62 | SSPP_VIG1 = 2, |
| 71 | SSPP_RGB0 = 3, | 63 | SSPP_VIG2 = 3, |
| 72 | SSPP_RGB1 = 4, | 64 | SSPP_RGB0 = 4, |
| 73 | SSPP_RGB2 = 5, | 65 | SSPP_RGB1 = 5, |
| 74 | SSPP_DMA0 = 6, | 66 | SSPP_RGB2 = 6, |
| 75 | SSPP_DMA1 = 7, | 67 | SSPP_DMA0 = 7, |
| 76 | SSPP_VIG3 = 8, | 68 | SSPP_DMA1 = 8, |
| 77 | SSPP_RGB3 = 9, | 69 | SSPP_VIG3 = 9, |
| 70 | SSPP_RGB3 = 10, | ||
| 71 | SSPP_CURSOR0 = 11, | ||
| 72 | SSPP_CURSOR1 = 12, | ||
| 78 | }; | 73 | }; |
| 79 | 74 | ||
| 80 | enum mdp5_ctl_mode { | 75 | enum mdp5_ctl_mode { |
| @@ -532,6 +527,7 @@ static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id va | |||
| 532 | static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) | 527 | static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) |
| 533 | { | 528 | { |
| 534 | switch (idx) { | 529 | switch (idx) { |
| 530 | case SSPP_NONE: return (INVALID_IDX(idx)); | ||
| 535 | case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); | 531 | case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); |
| 536 | case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); | 532 | case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); |
| 537 | case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); | 533 | case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); |
| @@ -542,6 +538,8 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) | |||
| 542 | case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); | 538 | case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); |
| 543 | case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); | 539 | case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); |
| 544 | case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); | 540 | case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); |
| 541 | case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]); | ||
| 542 | case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]); | ||
| 545 | default: return INVALID_IDX(idx); | 543 | default: return INVALID_IDX(idx); |
| 546 | } | 544 | } |
| 547 | } | 545 | } |
| @@ -1073,6 +1071,10 @@ static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000 | |||
| 1073 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 | 1071 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 |
| 1074 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 | 1072 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 |
| 1075 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 | 1073 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 |
| 1074 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020 | ||
| 1075 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040 | ||
| 1076 | #define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080 | ||
| 1077 | #define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000 | ||
| 1076 | 1078 | ||
| 1077 | static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } | 1079 | static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } |
| 1078 | #define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 | 1080 | #define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index 618b2ffed9b4..34ab553f6897 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | |||
| @@ -421,6 +421,16 @@ const struct mdp5_cfg_hw msm8x96_config = { | |||
| 421 | MDP_PIPE_CAP_SW_PIX_EXT | | 421 | MDP_PIPE_CAP_SW_PIX_EXT | |
| 422 | 0, | 422 | 0, |
| 423 | }, | 423 | }, |
| 424 | .pipe_cursor = { | ||
| 425 | .count = 2, | ||
| 426 | .base = { 0x34000, 0x36000 }, | ||
| 427 | .caps = MDP_PIPE_CAP_HFLIP | | ||
| 428 | MDP_PIPE_CAP_VFLIP | | ||
| 429 | MDP_PIPE_CAP_SW_PIX_EXT | | ||
| 430 | MDP_PIPE_CAP_CURSOR | | ||
| 431 | 0, | ||
| 432 | }, | ||
| 433 | |||
| 424 | .lm = { | 434 | .lm = { |
| 425 | .count = 6, | 435 | .count = 6, |
| 426 | .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, | 436 | .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h index 050e1618c836..b1c7daaede86 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h | |||
| @@ -32,7 +32,7 @@ extern const struct mdp5_cfg_hw *mdp5_cfg; | |||
| 32 | typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); | 32 | typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); |
| 33 | 33 | ||
| 34 | #define MDP5_SUB_BLOCK_DEFINITION \ | 34 | #define MDP5_SUB_BLOCK_DEFINITION \ |
| 35 | int count; \ | 35 | unsigned int count; \ |
| 36 | uint32_t base[MAX_BASES] | 36 | uint32_t base[MAX_BASES] |
| 37 | 37 | ||
| 38 | struct mdp5_sub_block { | 38 | struct mdp5_sub_block { |
| @@ -85,6 +85,7 @@ struct mdp5_cfg_hw { | |||
| 85 | struct mdp5_pipe_block pipe_vig; | 85 | struct mdp5_pipe_block pipe_vig; |
| 86 | struct mdp5_pipe_block pipe_rgb; | 86 | struct mdp5_pipe_block pipe_rgb; |
| 87 | struct mdp5_pipe_block pipe_dma; | 87 | struct mdp5_pipe_block pipe_dma; |
| 88 | struct mdp5_pipe_block pipe_cursor; | ||
| 88 | struct mdp5_lm_block lm; | 89 | struct mdp5_lm_block lm; |
| 89 | struct mdp5_sub_block dspp; | 90 | struct mdp5_sub_block dspp; |
| 90 | struct mdp5_sub_block ad; | 91 | struct mdp5_sub_block ad; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index c627ab6d0061..df1c8adec3f3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | |||
| @@ -16,16 +16,6 @@ | |||
| 16 | #include "drm_crtc.h" | 16 | #include "drm_crtc.h" |
| 17 | #include "drm_crtc_helper.h" | 17 | #include "drm_crtc_helper.h" |
| 18 | 18 | ||
| 19 | struct mdp5_cmd_encoder { | ||
| 20 | struct drm_encoder base; | ||
| 21 | struct mdp5_interface intf; | ||
| 22 | bool enabled; | ||
| 23 | uint32_t bsc; | ||
| 24 | |||
| 25 | struct mdp5_ctl *ctl; | ||
| 26 | }; | ||
| 27 | #define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base) | ||
| 28 | |||
| 29 | static struct mdp5_kms *get_kms(struct drm_encoder *encoder) | 19 | static struct mdp5_kms *get_kms(struct drm_encoder *encoder) |
| 30 | { | 20 | { |
| 31 | struct msm_drm_private *priv = encoder->dev->dev_private; | 21 | struct msm_drm_private *priv = encoder->dev->dev_private; |
| @@ -36,47 +26,8 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) | |||
| 36 | #include <mach/board.h> | 26 | #include <mach/board.h> |
| 37 | #include <linux/msm-bus.h> | 27 | #include <linux/msm-bus.h> |
| 38 | #include <linux/msm-bus-board.h> | 28 | #include <linux/msm-bus-board.h> |
| 39 | #define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ | ||
| 40 | { \ | ||
| 41 | .src = MSM_BUS_MASTER_MDP_PORT0, \ | ||
| 42 | .dst = MSM_BUS_SLAVE_EBI_CH0, \ | ||
| 43 | .ab = (ab_val), \ | ||
| 44 | .ib = (ib_val), \ | ||
| 45 | } | ||
| 46 | |||
| 47 | static struct msm_bus_vectors mdp_bus_vectors[] = { | ||
| 48 | MDP_BUS_VECTOR_ENTRY(0, 0), | ||
| 49 | MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), | ||
| 50 | }; | ||
| 51 | static struct msm_bus_paths mdp_bus_usecases[] = { { | ||
| 52 | .num_paths = 1, | ||
| 53 | .vectors = &mdp_bus_vectors[0], | ||
| 54 | }, { | ||
| 55 | .num_paths = 1, | ||
| 56 | .vectors = &mdp_bus_vectors[1], | ||
| 57 | } }; | ||
| 58 | static struct msm_bus_scale_pdata mdp_bus_scale_table = { | ||
| 59 | .usecase = mdp_bus_usecases, | ||
| 60 | .num_usecases = ARRAY_SIZE(mdp_bus_usecases), | ||
| 61 | .name = "mdss_mdp", | ||
| 62 | }; | ||
| 63 | |||
| 64 | static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) | ||
| 65 | { | ||
| 66 | mdp5_cmd_enc->bsc = msm_bus_scale_register_client( | ||
| 67 | &mdp_bus_scale_table); | ||
| 68 | DBG("bus scale client: %08x", mdp5_cmd_enc->bsc); | ||
| 69 | } | ||
| 70 | |||
| 71 | static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) | ||
| 72 | { | ||
| 73 | if (mdp5_cmd_enc->bsc) { | ||
| 74 | msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc); | ||
| 75 | mdp5_cmd_enc->bsc = 0; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | 29 | ||
| 79 | static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) | 30 | static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) |
| 80 | { | 31 | { |
| 81 | if (mdp5_cmd_enc->bsc) { | 32 | if (mdp5_cmd_enc->bsc) { |
| 82 | DBG("set bus scaling: %d", idx); | 33 | DBG("set bus scaling: %d", idx); |
| @@ -89,14 +40,12 @@ static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) | |||
| 89 | } | 40 | } |
| 90 | } | 41 | } |
| 91 | #else | 42 | #else |
| 92 | static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} | 43 | static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {} |
| 93 | static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} | ||
| 94 | static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {} | ||
| 95 | #endif | 44 | #endif |
| 96 | 45 | ||
| 97 | #define VSYNC_CLK_RATE 19200000 | 46 | #define VSYNC_CLK_RATE 19200000 |
| 98 | static int pingpong_tearcheck_setup(struct drm_encoder *encoder, | 47 | static int pingpong_tearcheck_setup(struct drm_encoder *encoder, |
| 99 | struct drm_display_mode *mode) | 48 | struct drm_display_mode *mode) |
| 100 | { | 49 | { |
| 101 | struct mdp5_kms *mdp5_kms = get_kms(encoder); | 50 | struct mdp5_kms *mdp5_kms = get_kms(encoder); |
| 102 | struct device *dev = encoder->dev->dev; | 51 | struct device *dev = encoder->dev->dev; |
| @@ -176,23 +125,11 @@ static void pingpong_tearcheck_disable(struct drm_encoder *encoder) | |||
| 176 | clk_disable_unprepare(mdp5_kms->vsync_clk); | 125 | clk_disable_unprepare(mdp5_kms->vsync_clk); |
| 177 | } | 126 | } |
| 178 | 127 | ||
| 179 | static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder) | 128 | void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, |
| 180 | { | 129 | struct drm_display_mode *mode, |
| 181 | struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); | 130 | struct drm_display_mode *adjusted_mode) |
| 182 | bs_fini(mdp5_cmd_enc); | ||
| 183 | drm_encoder_cleanup(encoder); | ||
| 184 | kfree(mdp5_cmd_enc); | ||
| 185 | } | ||
| 186 | |||
| 187 | static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = { | ||
| 188 | .destroy = mdp5_cmd_encoder_destroy, | ||
| 189 | }; | ||
| 190 | |||
| 191 | static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, | ||
| 192 | struct drm_display_mode *mode, | ||
| 193 | struct drm_display_mode *adjusted_mode) | ||
| 194 | { | 131 | { |
| 195 | struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); | 132 | struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); |
| 196 | 133 | ||
| 197 | mode = adjusted_mode; | 134 | mode = adjusted_mode; |
| 198 | 135 | ||
| @@ -209,9 +146,9 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, | |||
| 209 | mdp5_cmd_enc->ctl); | 146 | mdp5_cmd_enc->ctl); |
| 210 | } | 147 | } |
| 211 | 148 | ||
| 212 | static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) | 149 | void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) |
| 213 | { | 150 | { |
| 214 | struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); | 151 | struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); |
| 215 | struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; | 152 | struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; |
| 216 | struct mdp5_interface *intf = &mdp5_cmd_enc->intf; | 153 | struct mdp5_interface *intf = &mdp5_cmd_enc->intf; |
| 217 | 154 | ||
| @@ -228,9 +165,9 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) | |||
| 228 | mdp5_cmd_enc->enabled = false; | 165 | mdp5_cmd_enc->enabled = false; |
| 229 | } | 166 | } |
| 230 | 167 | ||
| 231 | static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) | 168 | void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) |
| 232 | { | 169 | { |
| 233 | struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); | 170 | struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); |
| 234 | struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; | 171 | struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; |
| 235 | struct mdp5_interface *intf = &mdp5_cmd_enc->intf; | 172 | struct mdp5_interface *intf = &mdp5_cmd_enc->intf; |
| 236 | 173 | ||
| @@ -248,16 +185,10 @@ static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) | |||
| 248 | mdp5_cmd_enc->enabled = true; | 185 | mdp5_cmd_enc->enabled = true; |
| 249 | } | 186 | } |
| 250 | 187 | ||
| 251 | static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = { | ||
| 252 | .mode_set = mdp5_cmd_encoder_mode_set, | ||
| 253 | .disable = mdp5_cmd_encoder_disable, | ||
| 254 | .enable = mdp5_cmd_encoder_enable, | ||
| 255 | }; | ||
| 256 | |||
| 257 | int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, | 188 | int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, |
| 258 | struct drm_encoder *slave_encoder) | 189 | struct drm_encoder *slave_encoder) |
| 259 | { | 190 | { |
| 260 | struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); | 191 | struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); |
| 261 | struct mdp5_kms *mdp5_kms; | 192 | struct mdp5_kms *mdp5_kms; |
| 262 | int intf_num; | 193 | int intf_num; |
| 263 | u32 data = 0; | 194 | u32 data = 0; |
| @@ -292,43 +223,3 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, | |||
| 292 | 223 | ||
| 293 | return 0; | 224 | return 0; |
| 294 | } | 225 | } |
| 295 | |||
| 296 | /* initialize command mode encoder */ | ||
| 297 | struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, | ||
| 298 | struct mdp5_interface *intf, struct mdp5_ctl *ctl) | ||
| 299 | { | ||
| 300 | struct drm_encoder *encoder = NULL; | ||
| 301 | struct mdp5_cmd_encoder *mdp5_cmd_enc; | ||
| 302 | int ret; | ||
| 303 | |||
| 304 | if (WARN_ON((intf->type != INTF_DSI) && | ||
| 305 | (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) { | ||
| 306 | ret = -EINVAL; | ||
| 307 | goto fail; | ||
| 308 | } | ||
| 309 | |||
| 310 | mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL); | ||
| 311 | if (!mdp5_cmd_enc) { | ||
| 312 | ret = -ENOMEM; | ||
| 313 | goto fail; | ||
| 314 | } | ||
| 315 | |||
| 316 | memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf)); | ||
| 317 | encoder = &mdp5_cmd_enc->base; | ||
| 318 | mdp5_cmd_enc->ctl = ctl; | ||
| 319 | |||
| 320 | drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, | ||
| 321 | DRM_MODE_ENCODER_DSI, NULL); | ||
| 322 | |||
| 323 | drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs); | ||
| 324 | |||
| 325 | bs_init(mdp5_cmd_enc); | ||
| 326 | |||
| 327 | return encoder; | ||
| 328 | |||
| 329 | fail: | ||
| 330 | if (encoder) | ||
| 331 | mdp5_cmd_encoder_destroy(encoder); | ||
| 332 | |||
| 333 | return ERR_PTR(ret); | ||
| 334 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 1ce8a01a5a28..d0c8b38b96ce 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
| @@ -177,6 +177,21 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc) | |||
| 177 | kfree(mdp5_crtc); | 177 | kfree(mdp5_crtc); |
| 178 | } | 178 | } |
| 179 | 179 | ||
| 180 | static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) | ||
| 181 | { | ||
| 182 | switch (stage) { | ||
| 183 | case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA; | ||
| 184 | case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA; | ||
| 185 | case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA; | ||
| 186 | case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA; | ||
| 187 | case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA; | ||
| 188 | case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA; | ||
| 189 | case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA; | ||
| 190 | default: | ||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | } | ||
| 194 | |||
| 180 | /* | 195 | /* |
| 181 | * blend_setup() - blend all the planes of a CRTC | 196 | * blend_setup() - blend all the planes of a CRTC |
| 182 | * | 197 | * |
| @@ -195,8 +210,10 @@ static void blend_setup(struct drm_crtc *crtc) | |||
| 195 | uint32_t lm = mdp5_crtc->lm; | 210 | uint32_t lm = mdp5_crtc->lm; |
| 196 | uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; | 211 | uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; |
| 197 | unsigned long flags; | 212 | unsigned long flags; |
| 198 | uint8_t stage[STAGE_MAX + 1]; | 213 | enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE }; |
| 199 | int i, plane_cnt = 0; | 214 | int i, plane_cnt = 0; |
| 215 | bool bg_alpha_enabled = false; | ||
| 216 | u32 mixer_op_mode = 0; | ||
| 200 | #define blender(stage) ((stage) - STAGE0) | 217 | #define blender(stage) ((stage) - STAGE0) |
| 201 | 218 | ||
| 202 | hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); | 219 | hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); |
| @@ -218,6 +235,11 @@ static void blend_setup(struct drm_crtc *crtc) | |||
| 218 | if (!pstates[STAGE_BASE]) { | 235 | if (!pstates[STAGE_BASE]) { |
| 219 | ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; | 236 | ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; |
| 220 | DBG("Border Color is enabled"); | 237 | DBG("Border Color is enabled"); |
| 238 | } else if (plane_cnt) { | ||
| 239 | format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb)); | ||
| 240 | |||
| 241 | if (format->alpha_enable) | ||
| 242 | bg_alpha_enabled = true; | ||
| 221 | } | 243 | } |
| 222 | 244 | ||
| 223 | /* The reset for blending */ | 245 | /* The reset for blending */ |
| @@ -232,6 +254,12 @@ static void blend_setup(struct drm_crtc *crtc) | |||
| 232 | MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); | 254 | MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); |
| 233 | fg_alpha = pstates[i]->alpha; | 255 | fg_alpha = pstates[i]->alpha; |
| 234 | bg_alpha = 0xFF - pstates[i]->alpha; | 256 | bg_alpha = 0xFF - pstates[i]->alpha; |
| 257 | |||
| 258 | if (!format->alpha_enable && bg_alpha_enabled) | ||
| 259 | mixer_op_mode = 0; | ||
| 260 | else | ||
| 261 | mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i); | ||
| 262 | |||
| 235 | DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); | 263 | DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); |
| 236 | 264 | ||
| 237 | if (format->alpha_enable && pstates[i]->premultiplied) { | 265 | if (format->alpha_enable && pstates[i]->premultiplied) { |
| @@ -268,6 +296,8 @@ static void blend_setup(struct drm_crtc *crtc) | |||
| 268 | blender(i)), bg_alpha); | 296 | blender(i)), bg_alpha); |
| 269 | } | 297 | } |
| 270 | 298 | ||
| 299 | mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode); | ||
| 300 | |||
| 271 | mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); | 301 | mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); |
| 272 | 302 | ||
| 273 | out: | 303 | out: |
| @@ -370,6 +400,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, | |||
| 370 | struct plane_state pstates[STAGE_MAX + 1]; | 400 | struct plane_state pstates[STAGE_MAX + 1]; |
| 371 | const struct mdp5_cfg_hw *hw_cfg; | 401 | const struct mdp5_cfg_hw *hw_cfg; |
| 372 | const struct drm_plane_state *pstate; | 402 | const struct drm_plane_state *pstate; |
| 403 | bool cursor_plane = false; | ||
| 373 | int cnt = 0, base = 0, i; | 404 | int cnt = 0, base = 0, i; |
| 374 | 405 | ||
| 375 | DBG("%s: check", crtc->name); | 406 | DBG("%s: check", crtc->name); |
| @@ -379,6 +410,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, | |||
| 379 | pstates[cnt].state = to_mdp5_plane_state(pstate); | 410 | pstates[cnt].state = to_mdp5_plane_state(pstate); |
| 380 | 411 | ||
| 381 | cnt++; | 412 | cnt++; |
| 413 | |||
| 414 | if (plane->type == DRM_PLANE_TYPE_CURSOR) | ||
| 415 | cursor_plane = true; | ||
| 382 | } | 416 | } |
| 383 | 417 | ||
| 384 | /* assign a stage based on sorted zpos property */ | 418 | /* assign a stage based on sorted zpos property */ |
| @@ -390,6 +424,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, | |||
| 390 | if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) | 424 | if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) |
| 391 | base++; | 425 | base++; |
| 392 | 426 | ||
| 427 | /* trigger a warning if cursor isn't the highest zorder */ | ||
| 428 | WARN_ON(cursor_plane && | ||
| 429 | (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); | ||
| 430 | |||
| 393 | /* verify that there are not too many planes attached to crtc | 431 | /* verify that there are not too many planes attached to crtc |
| 394 | * and that we don't have conflicting mixer stages: | 432 | * and that we don't have conflicting mixer stages: |
| 395 | */ | 433 | */ |
| @@ -401,7 +439,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, | |||
| 401 | } | 439 | } |
| 402 | 440 | ||
| 403 | for (i = 0; i < cnt; i++) { | 441 | for (i = 0; i < cnt; i++) { |
| 404 | pstates[i].state->stage = STAGE_BASE + i + base; | 442 | if (cursor_plane && (i == (cnt - 1))) |
| 443 | pstates[i].state->stage = hw_cfg->lm.nb_stages; | ||
| 444 | else | ||
| 445 | pstates[i].state->stage = STAGE_BASE + i + base; | ||
| 405 | DBG("%s: assign pipe %s on stage=%d", crtc->name, | 446 | DBG("%s: assign pipe %s on stage=%d", crtc->name, |
| 406 | pstates[i].plane->name, | 447 | pstates[i].plane->name, |
| 407 | pstates[i].state->stage); | 448 | pstates[i].state->stage); |
| @@ -612,6 +653,16 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { | |||
| 612 | .cursor_move = mdp5_crtc_cursor_move, | 653 | .cursor_move = mdp5_crtc_cursor_move, |
| 613 | }; | 654 | }; |
| 614 | 655 | ||
| 656 | static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { | ||
| 657 | .set_config = drm_atomic_helper_set_config, | ||
| 658 | .destroy = mdp5_crtc_destroy, | ||
| 659 | .page_flip = drm_atomic_helper_page_flip, | ||
| 660 | .set_property = drm_atomic_helper_crtc_set_property, | ||
| 661 | .reset = drm_atomic_helper_crtc_reset, | ||
| 662 | .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, | ||
| 663 | .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, | ||
| 664 | }; | ||
| 665 | |||
| 615 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { | 666 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { |
| 616 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, | 667 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, |
| 617 | .disable = mdp5_crtc_disable, | 668 | .disable = mdp5_crtc_disable, |
| @@ -727,6 +778,13 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, | |||
| 727 | mdp5_ctl_set_pipeline(ctl, intf, lm); | 778 | mdp5_ctl_set_pipeline(ctl, intf, lm); |
| 728 | } | 779 | } |
| 729 | 780 | ||
| 781 | struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) | ||
| 782 | { | ||
| 783 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 784 | |||
| 785 | return mdp5_crtc->ctl; | ||
| 786 | } | ||
| 787 | |||
| 730 | int mdp5_crtc_get_lm(struct drm_crtc *crtc) | 788 | int mdp5_crtc_get_lm(struct drm_crtc *crtc) |
| 731 | { | 789 | { |
| 732 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | 790 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); |
| @@ -745,7 +803,8 @@ void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) | |||
| 745 | 803 | ||
| 746 | /* initialize crtc */ | 804 | /* initialize crtc */ |
| 747 | struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, | 805 | struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, |
| 748 | struct drm_plane *plane, int id) | 806 | struct drm_plane *plane, |
| 807 | struct drm_plane *cursor_plane, int id) | ||
| 749 | { | 808 | { |
| 750 | struct drm_crtc *crtc = NULL; | 809 | struct drm_crtc *crtc = NULL; |
| 751 | struct mdp5_crtc *mdp5_crtc; | 810 | struct mdp5_crtc *mdp5_crtc; |
| @@ -766,8 +825,12 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, | |||
| 766 | mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; | 825 | mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; |
| 767 | mdp5_crtc->err.irq = mdp5_crtc_err_irq; | 826 | mdp5_crtc->err.irq = mdp5_crtc_err_irq; |
| 768 | 827 | ||
| 769 | drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs, | 828 | if (cursor_plane) |
| 770 | NULL); | 829 | drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, |
| 830 | &mdp5_crtc_no_lm_cursor_funcs, NULL); | ||
| 831 | else | ||
| 832 | drm_crtc_init_with_planes(dev, crtc, plane, NULL, | ||
| 833 | &mdp5_crtc_funcs, NULL); | ||
| 771 | 834 | ||
| 772 | drm_flip_work_init(&mdp5_crtc->unref_cursor_work, | 835 | drm_flip_work_init(&mdp5_crtc->unref_cursor_work, |
| 773 | "unref cursor", unref_cursor_worker); | 836 | "unref cursor", unref_cursor_worker); |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index d021edc3b307..8b93f7e13200 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c | |||
| @@ -326,6 +326,8 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, | |||
| 326 | case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); | 326 | case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); |
| 327 | case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); | 327 | case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); |
| 328 | case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); | 328 | case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); |
| 329 | case SSPP_CURSOR0: | ||
| 330 | case SSPP_CURSOR1: | ||
| 329 | default: return 0; | 331 | default: return 0; |
| 330 | } | 332 | } |
| 331 | } | 333 | } |
| @@ -333,7 +335,7 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, | |||
| 333 | static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, | 335 | static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, |
| 334 | enum mdp_mixer_stage_id stage) | 336 | enum mdp_mixer_stage_id stage) |
| 335 | { | 337 | { |
| 336 | if (stage < STAGE6) | 338 | if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1)) |
| 337 | return 0; | 339 | return 0; |
| 338 | 340 | ||
| 339 | switch (pipe) { | 341 | switch (pipe) { |
| @@ -347,12 +349,14 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, | |||
| 347 | case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; | 349 | case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; |
| 348 | case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; | 350 | case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; |
| 349 | case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; | 351 | case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; |
| 352 | case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage); | ||
| 353 | case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage); | ||
| 350 | default: return 0; | 354 | default: return 0; |
| 351 | } | 355 | } |
| 352 | } | 356 | } |
| 353 | 357 | ||
| 354 | int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, | 358 | int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, |
| 355 | u32 ctl_blend_op_flags) | 359 | u32 ctl_blend_op_flags) |
| 356 | { | 360 | { |
| 357 | unsigned long flags; | 361 | unsigned long flags; |
| 358 | u32 blend_cfg = 0, blend_ext_cfg = 0; | 362 | u32 blend_cfg = 0, blend_ext_cfg = 0; |
| @@ -365,7 +369,7 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, | |||
| 365 | start_stage = STAGE_BASE; | 369 | start_stage = STAGE_BASE; |
| 366 | } | 370 | } |
| 367 | 371 | ||
| 368 | for (i = start_stage; i < start_stage + stage_cnt; i++) { | 372 | for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { |
| 369 | blend_cfg |= mdp_ctl_blend_mask(stage[i], i); | 373 | blend_cfg |= mdp_ctl_blend_mask(stage[i], i); |
| 370 | blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); | 374 | blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); |
| 371 | } | 375 | } |
| @@ -422,6 +426,8 @@ u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) | |||
| 422 | case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; | 426 | case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; |
| 423 | case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; | 427 | case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; |
| 424 | case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; | 428 | case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; |
| 429 | case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0; | ||
| 430 | case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1; | ||
| 425 | default: return 0; | 431 | default: return 0; |
| 426 | } | 432 | } |
| 427 | } | 433 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index 96148c6f863c..fda00d33e4db 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h | |||
| @@ -56,8 +56,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); | |||
| 56 | * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) | 56 | * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) |
| 57 | */ | 57 | */ |
| 58 | #define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) | 58 | #define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) |
| 59 | int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, | 59 | int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, |
| 60 | u32 ctl_blend_op_flags); | 60 | u32 ctl_blend_op_flags); |
| 61 | 61 | ||
| 62 | /** | 62 | /** |
| 63 | * mdp_ctl_flush_mask...() - Register FLUSH masks | 63 | * mdp_ctl_flush_mask...() - Register FLUSH masks |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index fe0c22230883..80fa482ae8ed 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | |||
| @@ -21,17 +21,6 @@ | |||
| 21 | #include "drm_crtc.h" | 21 | #include "drm_crtc.h" |
| 22 | #include "drm_crtc_helper.h" | 22 | #include "drm_crtc_helper.h" |
| 23 | 23 | ||
| 24 | struct mdp5_encoder { | ||
| 25 | struct drm_encoder base; | ||
| 26 | struct mdp5_interface intf; | ||
| 27 | spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ | ||
| 28 | bool enabled; | ||
| 29 | uint32_t bsc; | ||
| 30 | |||
| 31 | struct mdp5_ctl *ctl; | ||
| 32 | }; | ||
| 33 | #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) | ||
| 34 | |||
| 35 | static struct mdp5_kms *get_kms(struct drm_encoder *encoder) | 24 | static struct mdp5_kms *get_kms(struct drm_encoder *encoder) |
| 36 | { | 25 | { |
| 37 | struct msm_drm_private *priv = encoder->dev->dev_private; | 26 | struct msm_drm_private *priv = encoder->dev->dev_private; |
| @@ -112,9 +101,9 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = { | |||
| 112 | .destroy = mdp5_encoder_destroy, | 101 | .destroy = mdp5_encoder_destroy, |
| 113 | }; | 102 | }; |
| 114 | 103 | ||
| 115 | static void mdp5_encoder_mode_set(struct drm_encoder *encoder, | 104 | static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, |
| 116 | struct drm_display_mode *mode, | 105 | struct drm_display_mode *mode, |
| 117 | struct drm_display_mode *adjusted_mode) | 106 | struct drm_display_mode *adjusted_mode) |
| 118 | { | 107 | { |
| 119 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | 108 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); |
| 120 | struct mdp5_kms *mdp5_kms = get_kms(encoder); | 109 | struct mdp5_kms *mdp5_kms = get_kms(encoder); |
| @@ -221,7 +210,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, | |||
| 221 | mdp5_encoder->ctl); | 210 | mdp5_encoder->ctl); |
| 222 | } | 211 | } |
| 223 | 212 | ||
| 224 | static void mdp5_encoder_disable(struct drm_encoder *encoder) | 213 | static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) |
| 225 | { | 214 | { |
| 226 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | 215 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); |
| 227 | struct mdp5_kms *mdp5_kms = get_kms(encoder); | 216 | struct mdp5_kms *mdp5_kms = get_kms(encoder); |
| @@ -256,7 +245,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder) | |||
| 256 | mdp5_encoder->enabled = false; | 245 | mdp5_encoder->enabled = false; |
| 257 | } | 246 | } |
| 258 | 247 | ||
| 259 | static void mdp5_encoder_enable(struct drm_encoder *encoder) | 248 | static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) |
| 260 | { | 249 | { |
| 261 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | 250 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); |
| 262 | struct mdp5_kms *mdp5_kms = get_kms(encoder); | 251 | struct mdp5_kms *mdp5_kms = get_kms(encoder); |
| @@ -279,6 +268,41 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) | |||
| 279 | mdp5_encoder->enabled = true; | 268 | mdp5_encoder->enabled = true; |
| 280 | } | 269 | } |
| 281 | 270 | ||
| 271 | static void mdp5_encoder_mode_set(struct drm_encoder *encoder, | ||
| 272 | struct drm_display_mode *mode, | ||
| 273 | struct drm_display_mode *adjusted_mode) | ||
| 274 | { | ||
| 275 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
| 276 | struct mdp5_interface *intf = &mdp5_encoder->intf; | ||
| 277 | |||
| 278 | if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) | ||
| 279 | mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); | ||
| 280 | else | ||
| 281 | mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode); | ||
| 282 | } | ||
| 283 | |||
| 284 | static void mdp5_encoder_disable(struct drm_encoder *encoder) | ||
| 285 | { | ||
| 286 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
| 287 | struct mdp5_interface *intf = &mdp5_encoder->intf; | ||
| 288 | |||
| 289 | if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) | ||
| 290 | mdp5_cmd_encoder_disable(encoder); | ||
| 291 | else | ||
| 292 | mdp5_vid_encoder_disable(encoder); | ||
| 293 | } | ||
| 294 | |||
| 295 | static void mdp5_encoder_enable(struct drm_encoder *encoder) | ||
| 296 | { | ||
| 297 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
| 298 | struct mdp5_interface *intf = &mdp5_encoder->intf; | ||
| 299 | |||
| 300 | if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) | ||
| 301 | mdp5_cmd_encoder_disable(encoder); | ||
| 302 | else | ||
| 303 | mdp5_vid_encoder_enable(encoder); | ||
| 304 | } | ||
| 305 | |||
| 282 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { | 306 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { |
| 283 | .mode_set = mdp5_encoder_mode_set, | 307 | .mode_set = mdp5_encoder_mode_set, |
| 284 | .disable = mdp5_encoder_disable, | 308 | .disable = mdp5_encoder_disable, |
| @@ -303,8 +327,8 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder) | |||
| 303 | return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); | 327 | return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); |
| 304 | } | 328 | } |
| 305 | 329 | ||
| 306 | int mdp5_encoder_set_split_display(struct drm_encoder *encoder, | 330 | int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, |
| 307 | struct drm_encoder *slave_encoder) | 331 | struct drm_encoder *slave_encoder) |
| 308 | { | 332 | { |
| 309 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | 333 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); |
| 310 | struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); | 334 | struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); |
| @@ -342,6 +366,23 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, | |||
| 342 | return 0; | 366 | return 0; |
| 343 | } | 367 | } |
| 344 | 368 | ||
| 369 | void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) | ||
| 370 | { | ||
| 371 | struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); | ||
| 372 | struct mdp5_interface *intf = &mdp5_encoder->intf; | ||
| 373 | |||
| 374 | /* TODO: Expand this to set writeback modes too */ | ||
| 375 | if (cmd_mode) { | ||
| 376 | WARN_ON(intf->type != INTF_DSI); | ||
| 377 | intf->mode = MDP5_INTF_DSI_MODE_COMMAND; | ||
| 378 | } else { | ||
| 379 | if (intf->type == INTF_DSI) | ||
| 380 | intf->mode = MDP5_INTF_DSI_MODE_VIDEO; | ||
| 381 | else | ||
| 382 | intf->mode = MDP5_INTF_MODE_NONE; | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 345 | /* initialize encoder */ | 386 | /* initialize encoder */ |
| 346 | struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, | 387 | struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, |
| 347 | struct mdp5_interface *intf, struct mdp5_ctl *ctl) | 388 | struct mdp5_interface *intf, struct mdp5_ctl *ctl) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index c396d459a9d0..3eb0749223d9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | |||
| @@ -148,7 +148,15 @@ static int mdp5_set_split_display(struct msm_kms *kms, | |||
| 148 | return mdp5_cmd_encoder_set_split_display(encoder, | 148 | return mdp5_cmd_encoder_set_split_display(encoder, |
| 149 | slave_encoder); | 149 | slave_encoder); |
| 150 | else | 150 | else |
| 151 | return mdp5_encoder_set_split_display(encoder, slave_encoder); | 151 | return mdp5_vid_encoder_set_split_display(encoder, |
| 152 | slave_encoder); | ||
| 153 | } | ||
| 154 | |||
| 155 | static void mdp5_set_encoder_mode(struct msm_kms *kms, | ||
| 156 | struct drm_encoder *encoder, | ||
| 157 | bool cmd_mode) | ||
| 158 | { | ||
| 159 | mdp5_encoder_set_intf_mode(encoder, cmd_mode); | ||
| 152 | } | 160 | } |
| 153 | 161 | ||
| 154 | static void mdp5_kms_destroy(struct msm_kms *kms) | 162 | static void mdp5_kms_destroy(struct msm_kms *kms) |
| @@ -230,6 +238,7 @@ static const struct mdp_kms_funcs kms_funcs = { | |||
| 230 | .get_format = mdp_get_format, | 238 | .get_format = mdp_get_format, |
| 231 | .round_pixclk = mdp5_round_pixclk, | 239 | .round_pixclk = mdp5_round_pixclk, |
| 232 | .set_split_display = mdp5_set_split_display, | 240 | .set_split_display = mdp5_set_split_display, |
| 241 | .set_encoder_mode = mdp5_set_encoder_mode, | ||
| 233 | .destroy = mdp5_kms_destroy, | 242 | .destroy = mdp5_kms_destroy, |
| 234 | #ifdef CONFIG_DEBUG_FS | 243 | #ifdef CONFIG_DEBUG_FS |
| 235 | .debugfs_init = mdp5_kms_debugfs_init, | 244 | .debugfs_init = mdp5_kms_debugfs_init, |
| @@ -267,7 +276,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) | |||
| 267 | 276 | ||
| 268 | static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, | 277 | static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, |
| 269 | enum mdp5_intf_type intf_type, int intf_num, | 278 | enum mdp5_intf_type intf_type, int intf_num, |
| 270 | enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl) | 279 | struct mdp5_ctl *ctl) |
| 271 | { | 280 | { |
| 272 | struct drm_device *dev = mdp5_kms->dev; | 281 | struct drm_device *dev = mdp5_kms->dev; |
| 273 | struct msm_drm_private *priv = dev->dev_private; | 282 | struct msm_drm_private *priv = dev->dev_private; |
| @@ -275,21 +284,15 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, | |||
| 275 | struct mdp5_interface intf = { | 284 | struct mdp5_interface intf = { |
| 276 | .num = intf_num, | 285 | .num = intf_num, |
| 277 | .type = intf_type, | 286 | .type = intf_type, |
| 278 | .mode = intf_mode, | 287 | .mode = MDP5_INTF_MODE_NONE, |
| 279 | }; | 288 | }; |
| 280 | 289 | ||
| 281 | if ((intf_type == INTF_DSI) && | 290 | encoder = mdp5_encoder_init(dev, &intf, ctl); |
| 282 | (intf_mode == MDP5_INTF_DSI_MODE_COMMAND)) | ||
| 283 | encoder = mdp5_cmd_encoder_init(dev, &intf, ctl); | ||
| 284 | else | ||
| 285 | encoder = mdp5_encoder_init(dev, &intf, ctl); | ||
| 286 | |||
| 287 | if (IS_ERR(encoder)) { | 291 | if (IS_ERR(encoder)) { |
| 288 | dev_err(dev->dev, "failed to construct encoder\n"); | 292 | dev_err(dev->dev, "failed to construct encoder\n"); |
| 289 | return encoder; | 293 | return encoder; |
| 290 | } | 294 | } |
| 291 | 295 | ||
| 292 | encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; | ||
| 293 | priv->encoders[priv->num_encoders++] = encoder; | 296 | priv->encoders[priv->num_encoders++] = encoder; |
| 294 | 297 | ||
| 295 | return encoder; | 298 | return encoder; |
| @@ -338,8 +341,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) | |||
| 338 | break; | 341 | break; |
| 339 | } | 342 | } |
| 340 | 343 | ||
| 341 | encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, | 344 | encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl); |
| 342 | MDP5_INTF_MODE_NONE, ctl); | ||
| 343 | if (IS_ERR(encoder)) { | 345 | if (IS_ERR(encoder)) { |
| 344 | ret = PTR_ERR(encoder); | 346 | ret = PTR_ERR(encoder); |
| 345 | break; | 347 | break; |
| @@ -357,8 +359,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) | |||
| 357 | break; | 359 | break; |
| 358 | } | 360 | } |
| 359 | 361 | ||
| 360 | encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, | 362 | encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl); |
| 361 | MDP5_INTF_MODE_NONE, ctl); | ||
| 362 | if (IS_ERR(encoder)) { | 363 | if (IS_ERR(encoder)) { |
| 363 | ret = PTR_ERR(encoder); | 364 | ret = PTR_ERR(encoder); |
| 364 | break; | 365 | break; |
| @@ -369,9 +370,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) | |||
| 369 | case INTF_DSI: | 370 | case INTF_DSI: |
| 370 | { | 371 | { |
| 371 | int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); | 372 | int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); |
| 372 | struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM]; | ||
| 373 | enum mdp5_intf_mode mode; | ||
| 374 | int i; | ||
| 375 | 373 | ||
| 376 | if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { | 374 | if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { |
| 377 | dev_err(dev->dev, "failed to find dsi from intf %d\n", | 375 | dev_err(dev->dev, "failed to find dsi from intf %d\n", |
| @@ -389,19 +387,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) | |||
| 389 | break; | 387 | break; |
| 390 | } | 388 | } |
| 391 | 389 | ||
| 392 | for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { | 390 | encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl); |
| 393 | mode = (i == MSM_DSI_CMD_ENCODER_ID) ? | 391 | if (IS_ERR(encoder)) { |
| 394 | MDP5_INTF_DSI_MODE_COMMAND : | 392 | ret = PTR_ERR(encoder); |
| 395 | MDP5_INTF_DSI_MODE_VIDEO; | 393 | break; |
| 396 | dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI, | ||
| 397 | intf_num, mode, ctl); | ||
| 398 | if (IS_ERR(dsi_encs[i])) { | ||
| 399 | ret = PTR_ERR(dsi_encs[i]); | ||
| 400 | break; | ||
| 401 | } | ||
| 402 | } | 394 | } |
| 403 | 395 | ||
| 404 | ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); | 396 | ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); |
| 405 | break; | 397 | break; |
| 406 | } | 398 | } |
| 407 | default: | 399 | default: |
| @@ -418,20 +410,48 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) | |||
| 418 | struct drm_device *dev = mdp5_kms->dev; | 410 | struct drm_device *dev = mdp5_kms->dev; |
| 419 | struct msm_drm_private *priv = dev->dev_private; | 411 | struct msm_drm_private *priv = dev->dev_private; |
| 420 | const struct mdp5_cfg_hw *hw_cfg; | 412 | const struct mdp5_cfg_hw *hw_cfg; |
| 421 | int i, ret; | 413 | unsigned int num_crtcs; |
| 414 | int i, ret, pi = 0, ci = 0; | ||
| 415 | struct drm_plane *primary[MAX_BASES] = { NULL }; | ||
| 416 | struct drm_plane *cursor[MAX_BASES] = { NULL }; | ||
| 422 | 417 | ||
| 423 | hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); | 418 | hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); |
| 424 | 419 | ||
| 425 | /* Construct planes equaling the number of hw pipes, and CRTCs | 420 | /* |
| 426 | * for the N layer-mixers (LM). The first N planes become primary | 421 | * Construct encoders and modeset initialize connector devices |
| 422 | * for each external display interface. | ||
| 423 | */ | ||
| 424 | for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { | ||
| 425 | ret = modeset_init_intf(mdp5_kms, i); | ||
| 426 | if (ret) | ||
| 427 | goto fail; | ||
| 428 | } | ||
| 429 | |||
| 430 | /* | ||
| 431 | * We should ideally have less number of encoders (set up by parsing | ||
| 432 | * the MDP5 interfaces) than the number of layer mixers present in HW, | ||
| 433 | * but let's be safe here anyway | ||
| 434 | */ | ||
| 435 | num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count); | ||
| 436 | |||
| 437 | /* | ||
| 438 | * Construct planes equaling the number of hw pipes, and CRTCs for the | ||
| 439 | * N encoders set up by the driver. The first N planes become primary | ||
| 427 | * planes for the CRTCs, with the remainder as overlay planes: | 440 | * planes for the CRTCs, with the remainder as overlay planes: |
| 428 | */ | 441 | */ |
| 429 | for (i = 0; i < mdp5_kms->num_hwpipes; i++) { | 442 | for (i = 0; i < mdp5_kms->num_hwpipes; i++) { |
| 430 | bool primary = i < mdp5_cfg->lm.count; | 443 | struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; |
| 431 | struct drm_plane *plane; | 444 | struct drm_plane *plane; |
| 432 | struct drm_crtc *crtc; | 445 | enum drm_plane_type type; |
| 433 | 446 | ||
| 434 | plane = mdp5_plane_init(dev, primary); | 447 | if (i < num_crtcs) |
| 448 | type = DRM_PLANE_TYPE_PRIMARY; | ||
| 449 | else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) | ||
| 450 | type = DRM_PLANE_TYPE_CURSOR; | ||
| 451 | else | ||
| 452 | type = DRM_PLANE_TYPE_OVERLAY; | ||
| 453 | |||
| 454 | plane = mdp5_plane_init(dev, type); | ||
| 435 | if (IS_ERR(plane)) { | 455 | if (IS_ERR(plane)) { |
| 436 | ret = PTR_ERR(plane); | 456 | ret = PTR_ERR(plane); |
| 437 | dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); | 457 | dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); |
| @@ -439,10 +459,16 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) | |||
| 439 | } | 459 | } |
| 440 | priv->planes[priv->num_planes++] = plane; | 460 | priv->planes[priv->num_planes++] = plane; |
| 441 | 461 | ||
| 442 | if (!primary) | 462 | if (type == DRM_PLANE_TYPE_PRIMARY) |
| 443 | continue; | 463 | primary[pi++] = plane; |
| 464 | if (type == DRM_PLANE_TYPE_CURSOR) | ||
| 465 | cursor[ci++] = plane; | ||
| 466 | } | ||
| 467 | |||
| 468 | for (i = 0; i < num_crtcs; i++) { | ||
| 469 | struct drm_crtc *crtc; | ||
| 444 | 470 | ||
| 445 | crtc = mdp5_crtc_init(dev, plane, i); | 471 | crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); |
| 446 | if (IS_ERR(crtc)) { | 472 | if (IS_ERR(crtc)) { |
| 447 | ret = PTR_ERR(crtc); | 473 | ret = PTR_ERR(crtc); |
| 448 | dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); | 474 | dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); |
| @@ -451,13 +477,14 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) | |||
| 451 | priv->crtcs[priv->num_crtcs++] = crtc; | 477 | priv->crtcs[priv->num_crtcs++] = crtc; |
| 452 | } | 478 | } |
| 453 | 479 | ||
| 454 | /* Construct encoders and modeset initialize connector devices | 480 | /* |
| 455 | * for each external display interface. | 481 | * Now that we know the number of crtcs we've created, set the possible |
| 482 | * crtcs for the encoders | ||
| 456 | */ | 483 | */ |
| 457 | for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { | 484 | for (i = 0; i < priv->num_encoders; i++) { |
| 458 | ret = modeset_init_intf(mdp5_kms, i); | 485 | struct drm_encoder *encoder = priv->encoders[i]; |
| 459 | if (ret) | 486 | |
| 460 | goto fail; | 487 | encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; |
| 461 | } | 488 | } |
| 462 | 489 | ||
| 463 | return 0; | 490 | return 0; |
| @@ -773,6 +800,9 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms) | |||
| 773 | static const enum mdp5_pipe dma_planes[] = { | 800 | static const enum mdp5_pipe dma_planes[] = { |
| 774 | SSPP_DMA0, SSPP_DMA1, | 801 | SSPP_DMA0, SSPP_DMA1, |
| 775 | }; | 802 | }; |
| 803 | static const enum mdp5_pipe cursor_planes[] = { | ||
| 804 | SSPP_CURSOR0, SSPP_CURSOR1, | ||
| 805 | }; | ||
| 776 | const struct mdp5_cfg_hw *hw_cfg; | 806 | const struct mdp5_cfg_hw *hw_cfg; |
| 777 | int ret; | 807 | int ret; |
| 778 | 808 | ||
| @@ -796,6 +826,13 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms) | |||
| 796 | if (ret) | 826 | if (ret) |
| 797 | return ret; | 827 | return ret; |
| 798 | 828 | ||
| 829 | /* Construct cursor pipes: */ | ||
| 830 | ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, | ||
| 831 | cursor_planes, hw_cfg->pipe_cursor.base, | ||
| 832 | hw_cfg->pipe_cursor.caps); | ||
| 833 | if (ret) | ||
| 834 | return ret; | ||
| 835 | |||
| 799 | return 0; | 836 | return 0; |
| 800 | } | 837 | } |
| 801 | 838 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index cdfc63d90c7b..9de471191eba 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | |||
| @@ -126,6 +126,17 @@ struct mdp5_interface { | |||
| 126 | enum mdp5_intf_mode mode; | 126 | enum mdp5_intf_mode mode; |
| 127 | }; | 127 | }; |
| 128 | 128 | ||
| 129 | struct mdp5_encoder { | ||
| 130 | struct drm_encoder base; | ||
| 131 | struct mdp5_interface intf; | ||
| 132 | spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ | ||
| 133 | bool enabled; | ||
| 134 | uint32_t bsc; | ||
| 135 | |||
| 136 | struct mdp5_ctl *ctl; | ||
| 137 | }; | ||
| 138 | #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) | ||
| 139 | |||
| 129 | static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) | 140 | static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) |
| 130 | { | 141 | { |
| 131 | msm_writel(data, mdp5_kms->mmio + reg); | 142 | msm_writel(data, mdp5_kms->mmio + reg); |
| @@ -156,6 +167,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe) | |||
| 156 | NAME(RGB0), NAME(RGB1), NAME(RGB2), | 167 | NAME(RGB0), NAME(RGB1), NAME(RGB2), |
| 157 | NAME(DMA0), NAME(DMA1), | 168 | NAME(DMA0), NAME(DMA1), |
| 158 | NAME(VIG3), NAME(RGB3), | 169 | NAME(VIG3), NAME(RGB3), |
| 170 | NAME(CURSOR0), NAME(CURSOR1), | ||
| 159 | #undef NAME | 171 | #undef NAME |
| 160 | }; | 172 | }; |
| 161 | return names[pipe]; | 173 | return names[pipe]; |
| @@ -231,8 +243,10 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); | |||
| 231 | 243 | ||
| 232 | uint32_t mdp5_plane_get_flush(struct drm_plane *plane); | 244 | uint32_t mdp5_plane_get_flush(struct drm_plane *plane); |
| 233 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); | 245 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); |
| 234 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); | 246 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, |
| 247 | enum drm_plane_type type); | ||
| 235 | 248 | ||
| 249 | struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); | ||
| 236 | uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); | 250 | uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); |
| 237 | 251 | ||
| 238 | int mdp5_crtc_get_lm(struct drm_crtc *crtc); | 252 | int mdp5_crtc_get_lm(struct drm_crtc *crtc); |
| @@ -240,25 +254,36 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, | |||
| 240 | struct mdp5_interface *intf, struct mdp5_ctl *ctl); | 254 | struct mdp5_interface *intf, struct mdp5_ctl *ctl); |
| 241 | void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); | 255 | void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); |
| 242 | struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, | 256 | struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, |
| 243 | struct drm_plane *plane, int id); | 257 | struct drm_plane *plane, |
| 258 | struct drm_plane *cursor_plane, int id); | ||
| 244 | 259 | ||
| 245 | struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, | 260 | struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, |
| 246 | struct mdp5_interface *intf, struct mdp5_ctl *ctl); | 261 | struct mdp5_interface *intf, struct mdp5_ctl *ctl); |
| 247 | int mdp5_encoder_set_split_display(struct drm_encoder *encoder, | 262 | int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, |
| 248 | struct drm_encoder *slave_encoder); | 263 | struct drm_encoder *slave_encoder); |
| 264 | void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode); | ||
| 249 | int mdp5_encoder_get_linecount(struct drm_encoder *encoder); | 265 | int mdp5_encoder_get_linecount(struct drm_encoder *encoder); |
| 250 | u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); | 266 | u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); |
| 251 | 267 | ||
| 252 | #ifdef CONFIG_DRM_MSM_DSI | 268 | #ifdef CONFIG_DRM_MSM_DSI |
| 253 | struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, | 269 | void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, |
| 254 | struct mdp5_interface *intf, struct mdp5_ctl *ctl); | 270 | struct drm_display_mode *mode, |
| 271 | struct drm_display_mode *adjusted_mode); | ||
| 272 | void mdp5_cmd_encoder_disable(struct drm_encoder *encoder); | ||
| 273 | void mdp5_cmd_encoder_enable(struct drm_encoder *encoder); | ||
| 255 | int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, | 274 | int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, |
| 256 | struct drm_encoder *slave_encoder); | 275 | struct drm_encoder *slave_encoder); |
| 257 | #else | 276 | #else |
| 258 | static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, | 277 | static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, |
| 259 | struct mdp5_interface *intf, struct mdp5_ctl *ctl) | 278 | struct drm_display_mode *mode, |
| 279 | struct drm_display_mode *adjusted_mode) | ||
| 280 | { | ||
| 281 | } | ||
| 282 | static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) | ||
| 283 | { | ||
| 284 | } | ||
| 285 | static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) | ||
| 260 | { | 286 | { |
| 261 | return ERR_PTR(-EINVAL); | ||
| 262 | } | 287 | } |
| 263 | static inline int mdp5_cmd_encoder_set_split_display( | 288 | static inline int mdp5_cmd_encoder_set_split_display( |
| 264 | struct drm_encoder *encoder, struct drm_encoder *slave_encoder) | 289 | struct drm_encoder *encoder, struct drm_encoder *slave_encoder) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c index 1ae9dc8d260d..35c4dabb0c0c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c | |||
| @@ -53,6 +53,14 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s, | |||
| 53 | if (caps & ~cur->caps) | 53 | if (caps & ~cur->caps) |
| 54 | continue; | 54 | continue; |
| 55 | 55 | ||
| 56 | /* | ||
| 57 | * don't assign a cursor pipe to a plane that isn't going to | ||
| 58 | * be used as a cursor | ||
| 59 | */ | ||
| 60 | if (cur->caps & MDP_PIPE_CAP_CURSOR && | ||
| 61 | plane->type != DRM_PLANE_TYPE_CURSOR) | ||
| 62 | continue; | ||
| 63 | |||
| 56 | /* possible candidate, take the one with the | 64 | /* possible candidate, take the one with the |
| 57 | * fewest unneeded caps bits set: | 65 | * fewest unneeded caps bits set: |
| 58 | */ | 66 | */ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index b9fb111d3428..0ffb8affef35 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
| @@ -29,6 +29,11 @@ struct mdp5_plane { | |||
| 29 | 29 | ||
| 30 | static int mdp5_plane_mode_set(struct drm_plane *plane, | 30 | static int mdp5_plane_mode_set(struct drm_plane *plane, |
| 31 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | 31 | struct drm_crtc *crtc, struct drm_framebuffer *fb, |
| 32 | struct drm_rect *src, struct drm_rect *dest); | ||
| 33 | |||
| 34 | static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, | ||
| 35 | struct drm_crtc *crtc, | ||
| 36 | struct drm_framebuffer *fb, | ||
| 32 | int crtc_x, int crtc_y, | 37 | int crtc_x, int crtc_y, |
| 33 | unsigned int crtc_w, unsigned int crtc_h, | 38 | unsigned int crtc_w, unsigned int crtc_h, |
| 34 | uint32_t src_x, uint32_t src_y, | 39 | uint32_t src_x, uint32_t src_y, |
| @@ -45,7 +50,7 @@ static struct mdp5_kms *get_kms(struct drm_plane *plane) | |||
| 45 | 50 | ||
| 46 | static bool plane_enabled(struct drm_plane_state *state) | 51 | static bool plane_enabled(struct drm_plane_state *state) |
| 47 | { | 52 | { |
| 48 | return state->fb && state->crtc; | 53 | return state->visible; |
| 49 | } | 54 | } |
| 50 | 55 | ||
| 51 | static void mdp5_plane_destroy(struct drm_plane *plane) | 56 | static void mdp5_plane_destroy(struct drm_plane *plane) |
| @@ -246,6 +251,19 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { | |||
| 246 | .atomic_print_state = mdp5_plane_atomic_print_state, | 251 | .atomic_print_state = mdp5_plane_atomic_print_state, |
| 247 | }; | 252 | }; |
| 248 | 253 | ||
| 254 | static const struct drm_plane_funcs mdp5_cursor_plane_funcs = { | ||
| 255 | .update_plane = mdp5_update_cursor_plane_legacy, | ||
| 256 | .disable_plane = drm_atomic_helper_disable_plane, | ||
| 257 | .destroy = mdp5_plane_destroy, | ||
| 258 | .set_property = drm_atomic_helper_plane_set_property, | ||
| 259 | .atomic_set_property = mdp5_plane_atomic_set_property, | ||
| 260 | .atomic_get_property = mdp5_plane_atomic_get_property, | ||
| 261 | .reset = mdp5_plane_reset, | ||
| 262 | .atomic_duplicate_state = mdp5_plane_duplicate_state, | ||
| 263 | .atomic_destroy_state = mdp5_plane_destroy_state, | ||
| 264 | .atomic_print_state = mdp5_plane_atomic_print_state, | ||
| 265 | }; | ||
| 266 | |||
| 249 | static int mdp5_plane_prepare_fb(struct drm_plane *plane, | 267 | static int mdp5_plane_prepare_fb(struct drm_plane *plane, |
| 250 | struct drm_plane_state *new_state) | 268 | struct drm_plane_state *new_state) |
| 251 | { | 269 | { |
| @@ -272,15 +290,20 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, | |||
| 272 | msm_framebuffer_cleanup(fb, mdp5_kms->id); | 290 | msm_framebuffer_cleanup(fb, mdp5_kms->id); |
| 273 | } | 291 | } |
| 274 | 292 | ||
| 275 | static int mdp5_plane_atomic_check(struct drm_plane *plane, | 293 | #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) |
| 276 | struct drm_plane_state *state) | 294 | static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, |
| 295 | struct drm_plane_state *state) | ||
| 277 | { | 296 | { |
| 278 | struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); | 297 | struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); |
| 298 | struct drm_plane *plane = state->plane; | ||
| 279 | struct drm_plane_state *old_state = plane->state; | 299 | struct drm_plane_state *old_state = plane->state; |
| 280 | struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); | 300 | struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); |
| 281 | bool new_hwpipe = false; | 301 | bool new_hwpipe = false; |
| 282 | uint32_t max_width, max_height; | 302 | uint32_t max_width, max_height; |
| 283 | uint32_t caps = 0; | 303 | uint32_t caps = 0; |
| 304 | struct drm_rect clip; | ||
| 305 | int min_scale, max_scale; | ||
| 306 | int ret; | ||
| 284 | 307 | ||
| 285 | DBG("%s: check (%d -> %d)", plane->name, | 308 | DBG("%s: check (%d -> %d)", plane->name, |
| 286 | plane_enabled(old_state), plane_enabled(state)); | 309 | plane_enabled(old_state), plane_enabled(state)); |
| @@ -296,6 +319,18 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
| 296 | return -ERANGE; | 319 | return -ERANGE; |
| 297 | } | 320 | } |
| 298 | 321 | ||
| 322 | clip.x1 = 0; | ||
| 323 | clip.y1 = 0; | ||
| 324 | clip.x2 = crtc_state->adjusted_mode.hdisplay; | ||
| 325 | clip.y2 = crtc_state->adjusted_mode.vdisplay; | ||
| 326 | min_scale = FRAC_16_16(1, 8); | ||
| 327 | max_scale = FRAC_16_16(8, 1); | ||
| 328 | |||
| 329 | ret = drm_plane_helper_check_state(state, &clip, min_scale, | ||
| 330 | max_scale, true, true); | ||
| 331 | if (ret) | ||
| 332 | return ret; | ||
| 333 | |||
| 299 | if (plane_enabled(state)) { | 334 | if (plane_enabled(state)) { |
| 300 | unsigned int rotation; | 335 | unsigned int rotation; |
| 301 | const struct mdp_format *format; | 336 | const struct mdp_format *format; |
| @@ -321,6 +356,9 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
| 321 | if (rotation & DRM_REFLECT_Y) | 356 | if (rotation & DRM_REFLECT_Y) |
| 322 | caps |= MDP_PIPE_CAP_VFLIP; | 357 | caps |= MDP_PIPE_CAP_VFLIP; |
| 323 | 358 | ||
| 359 | if (plane->type == DRM_PLANE_TYPE_CURSOR) | ||
| 360 | caps |= MDP_PIPE_CAP_CURSOR; | ||
| 361 | |||
| 324 | /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ | 362 | /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ |
| 325 | if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) | 363 | if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) |
| 326 | new_hwpipe = true; | 364 | new_hwpipe = true; |
| @@ -356,6 +394,23 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
| 356 | return 0; | 394 | return 0; |
| 357 | } | 395 | } |
| 358 | 396 | ||
| 397 | static int mdp5_plane_atomic_check(struct drm_plane *plane, | ||
| 398 | struct drm_plane_state *state) | ||
| 399 | { | ||
| 400 | struct drm_crtc *crtc; | ||
| 401 | struct drm_crtc_state *crtc_state; | ||
| 402 | |||
| 403 | crtc = state->crtc ? state->crtc : plane->state->crtc; | ||
| 404 | if (!crtc) | ||
| 405 | return 0; | ||
| 406 | |||
| 407 | crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); | ||
| 408 | if (WARN_ON(!crtc_state)) | ||
| 409 | return -EINVAL; | ||
| 410 | |||
| 411 | return mdp5_plane_atomic_check_with_state(crtc_state, state); | ||
| 412 | } | ||
| 413 | |||
| 359 | static void mdp5_plane_atomic_update(struct drm_plane *plane, | 414 | static void mdp5_plane_atomic_update(struct drm_plane *plane, |
| 360 | struct drm_plane_state *old_state) | 415 | struct drm_plane_state *old_state) |
| 361 | { | 416 | { |
| @@ -368,10 +423,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, | |||
| 368 | 423 | ||
| 369 | ret = mdp5_plane_mode_set(plane, | 424 | ret = mdp5_plane_mode_set(plane, |
| 370 | state->crtc, state->fb, | 425 | state->crtc, state->fb, |
| 371 | state->crtc_x, state->crtc_y, | 426 | &state->src, &state->dst); |
| 372 | state->crtc_w, state->crtc_h, | ||
| 373 | state->src_x, state->src_y, | ||
| 374 | state->src_w, state->src_h); | ||
| 375 | /* atomic_check should have ensured that this doesn't fail */ | 427 | /* atomic_check should have ensured that this doesn't fail */ |
| 376 | WARN_ON(ret < 0); | 428 | WARN_ON(ret < 0); |
| 377 | } | 429 | } |
| @@ -664,10 +716,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, | |||
| 664 | 716 | ||
| 665 | static int mdp5_plane_mode_set(struct drm_plane *plane, | 717 | static int mdp5_plane_mode_set(struct drm_plane *plane, |
| 666 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | 718 | struct drm_crtc *crtc, struct drm_framebuffer *fb, |
| 667 | int crtc_x, int crtc_y, | 719 | struct drm_rect *src, struct drm_rect *dest) |
| 668 | unsigned int crtc_w, unsigned int crtc_h, | ||
| 669 | uint32_t src_x, uint32_t src_y, | ||
| 670 | uint32_t src_w, uint32_t src_h) | ||
| 671 | { | 720 | { |
| 672 | struct drm_plane_state *pstate = plane->state; | 721 | struct drm_plane_state *pstate = plane->state; |
| 673 | struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; | 722 | struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; |
| @@ -683,6 +732,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, | |||
| 683 | uint32_t pix_format; | 732 | uint32_t pix_format; |
| 684 | unsigned int rotation; | 733 | unsigned int rotation; |
| 685 | bool vflip, hflip; | 734 | bool vflip, hflip; |
| 735 | int crtc_x, crtc_y; | ||
| 736 | unsigned int crtc_w, crtc_h; | ||
| 737 | uint32_t src_x, src_y; | ||
| 738 | uint32_t src_w, src_h; | ||
| 686 | unsigned long flags; | 739 | unsigned long flags; |
| 687 | int ret; | 740 | int ret; |
| 688 | 741 | ||
| @@ -695,6 +748,16 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, | |||
| 695 | format = to_mdp_format(msm_framebuffer_format(fb)); | 748 | format = to_mdp_format(msm_framebuffer_format(fb)); |
| 696 | pix_format = format->base.pixel_format; | 749 | pix_format = format->base.pixel_format; |
| 697 | 750 | ||
| 751 | src_x = src->x1; | ||
| 752 | src_y = src->y1; | ||
| 753 | src_w = drm_rect_width(src); | ||
| 754 | src_h = drm_rect_height(src); | ||
| 755 | |||
| 756 | crtc_x = dest->x1; | ||
| 757 | crtc_y = dest->y1; | ||
| 758 | crtc_w = drm_rect_width(dest); | ||
| 759 | crtc_h = drm_rect_height(dest); | ||
| 760 | |||
| 698 | /* src values are in Q16 fixed point, convert to integer: */ | 761 | /* src values are in Q16 fixed point, convert to integer: */ |
| 699 | src_x = src_x >> 16; | 762 | src_x = src_x >> 16; |
| 700 | src_y = src_y >> 16; | 763 | src_y = src_y >> 16; |
| @@ -818,12 +881,88 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, | |||
| 818 | return ret; | 881 | return ret; |
| 819 | } | 882 | } |
| 820 | 883 | ||
| 884 | static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, | ||
| 885 | struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
| 886 | int crtc_x, int crtc_y, | ||
| 887 | unsigned int crtc_w, unsigned int crtc_h, | ||
| 888 | uint32_t src_x, uint32_t src_y, | ||
| 889 | uint32_t src_w, uint32_t src_h) | ||
| 890 | { | ||
| 891 | struct drm_plane_state *plane_state, *new_plane_state; | ||
| 892 | struct mdp5_plane_state *mdp5_pstate; | ||
| 893 | struct drm_crtc_state *crtc_state = crtc->state; | ||
| 894 | int ret; | ||
| 895 | |||
| 896 | if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state)) | ||
| 897 | goto slow; | ||
| 898 | |||
| 899 | plane_state = plane->state; | ||
| 900 | mdp5_pstate = to_mdp5_plane_state(plane_state); | ||
| 901 | |||
| 902 | /* don't use fast path if we don't have a hwpipe allocated yet */ | ||
| 903 | if (!mdp5_pstate->hwpipe) | ||
| 904 | goto slow; | ||
| 905 | |||
| 906 | /* only allow changing of position(crtc x/y or src x/y) in fast path */ | ||
| 907 | if (plane_state->crtc != crtc || | ||
| 908 | plane_state->src_w != src_w || | ||
| 909 | plane_state->src_h != src_h || | ||
| 910 | plane_state->crtc_w != crtc_w || | ||
| 911 | plane_state->crtc_h != crtc_h || | ||
| 912 | !plane_state->fb || | ||
| 913 | plane_state->fb != fb) | ||
| 914 | goto slow; | ||
| 915 | |||
| 916 | new_plane_state = mdp5_plane_duplicate_state(plane); | ||
| 917 | if (!new_plane_state) | ||
| 918 | return -ENOMEM; | ||
| 919 | |||
| 920 | new_plane_state->src_x = src_x; | ||
| 921 | new_plane_state->src_y = src_y; | ||
| 922 | new_plane_state->src_w = src_w; | ||
| 923 | new_plane_state->src_h = src_h; | ||
| 924 | new_plane_state->crtc_x = crtc_x; | ||
| 925 | new_plane_state->crtc_y = crtc_y; | ||
| 926 | new_plane_state->crtc_w = crtc_w; | ||
| 927 | new_plane_state->crtc_h = crtc_h; | ||
| 928 | |||
| 929 | ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state); | ||
| 930 | if (ret) | ||
| 931 | goto slow_free; | ||
| 932 | |||
| 933 | if (new_plane_state->visible) { | ||
| 934 | struct mdp5_ctl *ctl; | ||
| 935 | |||
| 936 | ret = mdp5_plane_mode_set(plane, crtc, fb, | ||
| 937 | &new_plane_state->src, | ||
| 938 | &new_plane_state->dst); | ||
| 939 | WARN_ON(ret < 0); | ||
| 940 | |||
| 941 | ctl = mdp5_crtc_get_ctl(crtc); | ||
| 942 | |||
| 943 | mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane)); | ||
| 944 | } | ||
| 945 | |||
| 946 | *to_mdp5_plane_state(plane_state) = | ||
| 947 | *to_mdp5_plane_state(new_plane_state); | ||
| 948 | |||
| 949 | mdp5_plane_destroy_state(plane, new_plane_state); | ||
| 950 | |||
| 951 | return 0; | ||
| 952 | slow_free: | ||
| 953 | mdp5_plane_destroy_state(plane, new_plane_state); | ||
| 954 | slow: | ||
| 955 | return drm_atomic_helper_update_plane(plane, crtc, fb, | ||
| 956 | crtc_x, crtc_y, crtc_w, crtc_h, | ||
| 957 | src_x, src_y, src_w, src_h); | ||
| 958 | } | ||
| 959 | |||
| 821 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) | 960 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) |
| 822 | { | 961 | { |
| 823 | struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); | 962 | struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); |
| 824 | 963 | ||
| 825 | if (WARN_ON(!pstate->hwpipe)) | 964 | if (WARN_ON(!pstate->hwpipe)) |
| 826 | return 0; | 965 | return SSPP_NONE; |
| 827 | 966 | ||
| 828 | return pstate->hwpipe->pipe; | 967 | return pstate->hwpipe->pipe; |
| 829 | } | 968 | } |
| @@ -839,12 +978,12 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) | |||
| 839 | } | 978 | } |
| 840 | 979 | ||
| 841 | /* initialize plane */ | 980 | /* initialize plane */ |
| 842 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) | 981 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, |
| 982 | enum drm_plane_type type) | ||
| 843 | { | 983 | { |
| 844 | struct drm_plane *plane = NULL; | 984 | struct drm_plane *plane = NULL; |
| 845 | struct mdp5_plane *mdp5_plane; | 985 | struct mdp5_plane *mdp5_plane; |
| 846 | int ret; | 986 | int ret; |
| 847 | enum drm_plane_type type; | ||
| 848 | 987 | ||
| 849 | mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); | 988 | mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); |
| 850 | if (!mdp5_plane) { | 989 | if (!mdp5_plane) { |
| @@ -857,10 +996,16 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) | |||
| 857 | mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, | 996 | mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, |
| 858 | ARRAY_SIZE(mdp5_plane->formats), false); | 997 | ARRAY_SIZE(mdp5_plane->formats), false); |
| 859 | 998 | ||
| 860 | type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; | 999 | if (type == DRM_PLANE_TYPE_CURSOR) |
| 861 | ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, | 1000 | ret = drm_universal_plane_init(dev, plane, 0xff, |
| 862 | mdp5_plane->formats, mdp5_plane->nformats, | 1001 | &mdp5_cursor_plane_funcs, |
| 863 | type, NULL); | 1002 | mdp5_plane->formats, mdp5_plane->nformats, |
| 1003 | type, NULL); | ||
| 1004 | else | ||
| 1005 | ret = drm_universal_plane_init(dev, plane, 0xff, | ||
| 1006 | &mdp5_plane_funcs, | ||
| 1007 | mdp5_plane->formats, mdp5_plane->nformats, | ||
| 1008 | type, NULL); | ||
| 864 | if (ret) | 1009 | if (ret) |
| 865 | goto fail; | 1010 | goto fail; |
| 866 | 1011 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index 303130320748..7574cdfef418 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h | |||
| @@ -112,6 +112,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); | |||
| 112 | #define MDP_PIPE_CAP_CSC BIT(3) | 112 | #define MDP_PIPE_CAP_CSC BIT(3) |
| 113 | #define MDP_PIPE_CAP_DECIMATION BIT(4) | 113 | #define MDP_PIPE_CAP_DECIMATION BIT(4) |
| 114 | #define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) | 114 | #define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) |
| 115 | #define MDP_PIPE_CAP_CURSOR BIT(6) | ||
| 115 | 116 | ||
| 116 | static inline bool pipe_supports_yuv(uint32_t pipe_caps) | 117 | static inline bool pipe_supports_yuv(uint32_t pipe_caps) |
| 117 | { | 118 | { |
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 30b5d23e53b4..9633a68b14d7 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c | |||
| @@ -93,11 +93,6 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, | |||
| 93 | if (!crtc->state->enable) | 93 | if (!crtc->state->enable) |
| 94 | continue; | 94 | continue; |
| 95 | 95 | ||
| 96 | /* Legacy cursor ioctls are completely unsynced, and userspace | ||
| 97 | * relies on that (by doing tons of cursor updates). */ | ||
| 98 | if (old_state->legacy_cursor_update) | ||
| 99 | continue; | ||
| 100 | |||
| 101 | kms->funcs->wait_for_crtc_commit_done(kms, crtc); | 96 | kms->funcs->wait_for_crtc_commit_done(kms, crtc); |
| 102 | } | 97 | } |
| 103 | } | 98 | } |
| @@ -151,20 +146,29 @@ static void commit_worker(struct work_struct *work) | |||
| 151 | complete_commit(container_of(work, struct msm_commit, work), true); | 146 | complete_commit(container_of(work, struct msm_commit, work), true); |
| 152 | } | 147 | } |
| 153 | 148 | ||
| 149 | /* | ||
| 150 | * this func is identical to the drm_atomic_helper_check, but we keep this | ||
| 151 | * because we might eventually need to have a more finegrained check | ||
| 152 | * sequence without using the atomic helpers. | ||
| 153 | * | ||
| 154 | * In the past, we first called drm_atomic_helper_check_planes, and then | ||
| 155 | * drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's | ||
| 156 | * ->atomic_check could update ->mode_changed for pixel format changes. | ||
| 157 | * This, however isn't needed now because if there is a pixel format change, | ||
| 158 | * we just assign a new hwpipe for it with a new SMP allocation. We might | ||
| 159 | * eventually hit a condition where we would need to do a full modeset if | ||
| 160 | * we run out of planes. There, we'd probably need to set mode_changed. | ||
| 161 | */ | ||
| 154 | int msm_atomic_check(struct drm_device *dev, | 162 | int msm_atomic_check(struct drm_device *dev, |
| 155 | struct drm_atomic_state *state) | 163 | struct drm_atomic_state *state) |
| 156 | { | 164 | { |
| 157 | int ret; | 165 | int ret; |
| 158 | 166 | ||
| 159 | /* | 167 | ret = drm_atomic_helper_check_modeset(dev, state); |
| 160 | * msm ->atomic_check can update ->mode_changed for pixel format | ||
| 161 | * changes, hence must be run before we check the modeset changes. | ||
| 162 | */ | ||
| 163 | ret = drm_atomic_helper_check_planes(dev, state); | ||
| 164 | if (ret) | 168 | if (ret) |
| 165 | return ret; | 169 | return ret; |
| 166 | 170 | ||
| 167 | ret = drm_atomic_helper_check_modeset(dev, state); | 171 | ret = drm_atomic_helper_check_planes(dev, state); |
| 168 | if (ret) | 172 | if (ret) |
| 169 | return ret; | 173 | return ret; |
| 170 | 174 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 54207fe59307..cb47f4a14215 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
| @@ -91,6 +91,25 @@ module_param(dumpstate, bool, 0600); | |||
| 91 | * Util/helpers: | 91 | * Util/helpers: |
| 92 | */ | 92 | */ |
| 93 | 93 | ||
| 94 | struct clk *msm_clk_get(struct platform_device *pdev, const char *name) | ||
| 95 | { | ||
| 96 | struct clk *clk; | ||
| 97 | char name2[32]; | ||
| 98 | |||
| 99 | clk = devm_clk_get(&pdev->dev, name); | ||
| 100 | if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) | ||
| 101 | return clk; | ||
| 102 | |||
| 103 | snprintf(name2, sizeof(name2), "%s_clk", name); | ||
| 104 | |||
| 105 | clk = devm_clk_get(&pdev->dev, name2); | ||
| 106 | if (!IS_ERR(clk)) | ||
| 107 | dev_warn(&pdev->dev, "Using legacy clk name binding. Use " | ||
| 108 | "\"%s\" instead of \"%s\"\n", name, name2); | ||
| 109 | |||
| 110 | return clk; | ||
| 111 | } | ||
| 112 | |||
| 94 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, | 113 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, |
| 95 | const char *dbgname) | 114 | const char *dbgname) |
| 96 | { | 115 | { |
| @@ -984,6 +1003,7 @@ static int add_display_components(struct device *dev, | |||
| 984 | * as components. | 1003 | * as components. |
| 985 | */ | 1004 | */ |
| 986 | static const struct of_device_id msm_gpu_match[] = { | 1005 | static const struct of_device_id msm_gpu_match[] = { |
| 1006 | { .compatible = "qcom,adreno" }, | ||
| 987 | { .compatible = "qcom,adreno-3xx" }, | 1007 | { .compatible = "qcom,adreno-3xx" }, |
| 988 | { .compatible = "qcom,kgsl-3d0" }, | 1008 | { .compatible = "qcom,kgsl-3d0" }, |
| 989 | { }, | 1009 | { }, |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index ed4dad3ca133..cdd7b2f8e977 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
| @@ -275,16 +275,11 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, | |||
| 275 | struct drm_encoder *encoder); | 275 | struct drm_encoder *encoder); |
| 276 | 276 | ||
| 277 | struct msm_dsi; | 277 | struct msm_dsi; |
| 278 | enum msm_dsi_encoder_id { | ||
| 279 | MSM_DSI_VIDEO_ENCODER_ID = 0, | ||
| 280 | MSM_DSI_CMD_ENCODER_ID = 1, | ||
| 281 | MSM_DSI_ENCODER_NUM = 2 | ||
| 282 | }; | ||
| 283 | #ifdef CONFIG_DRM_MSM_DSI | 278 | #ifdef CONFIG_DRM_MSM_DSI |
| 284 | void __init msm_dsi_register(void); | 279 | void __init msm_dsi_register(void); |
| 285 | void __exit msm_dsi_unregister(void); | 280 | void __exit msm_dsi_unregister(void); |
| 286 | int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, | 281 | int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, |
| 287 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]); | 282 | struct drm_encoder *encoder); |
| 288 | #else | 283 | #else |
| 289 | static inline void __init msm_dsi_register(void) | 284 | static inline void __init msm_dsi_register(void) |
| 290 | { | 285 | { |
| @@ -293,8 +288,8 @@ static inline void __exit msm_dsi_unregister(void) | |||
| 293 | { | 288 | { |
| 294 | } | 289 | } |
| 295 | static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, | 290 | static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, |
| 296 | struct drm_device *dev, | 291 | struct drm_device *dev, |
| 297 | struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) | 292 | struct drm_encoder *encoder) |
| 298 | { | 293 | { |
| 299 | return -EINVAL; | 294 | return -EINVAL; |
| 300 | } | 295 | } |
| @@ -318,6 +313,7 @@ static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } | |||
| 318 | static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} | 313 | static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} |
| 319 | #endif | 314 | #endif |
| 320 | 315 | ||
| 316 | struct clk *msm_clk_get(struct platform_device *pdev, const char *name); | ||
| 321 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, | 317 | void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, |
| 322 | const char *dbgname); | 318 | const char *dbgname); |
| 323 | void msm_writel(u32 data, void __iomem *addr); | 319 | void msm_writel(u32 data, void __iomem *addr); |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 489676568a10..1172fe7a9252 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -95,13 +95,13 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
| 95 | */ | 95 | */ |
| 96 | submit->bos[i].flags = 0; | 96 | submit->bos[i].flags = 0; |
| 97 | 97 | ||
| 98 | ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); | 98 | if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) { |
| 99 | if (unlikely(ret)) { | ||
| 100 | pagefault_enable(); | 99 | pagefault_enable(); |
| 101 | spin_unlock(&file->table_lock); | 100 | spin_unlock(&file->table_lock); |
| 102 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | 101 | if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) { |
| 103 | if (ret) | 102 | ret = -EFAULT; |
| 104 | goto out; | 103 | goto out; |
| 104 | } | ||
| 105 | spin_lock(&file->table_lock); | 105 | spin_lock(&file->table_lock); |
| 106 | pagefault_disable(); | 106 | pagefault_disable(); |
| 107 | } | 107 | } |
| @@ -317,9 +317,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
| 317 | uint64_t iova; | 317 | uint64_t iova; |
| 318 | bool valid; | 318 | bool valid; |
| 319 | 319 | ||
| 320 | ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); | 320 | if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) { |
| 321 | if (ret) | 321 | ret = -EFAULT; |
| 322 | goto out; | 322 | goto out; |
| 323 | } | ||
| 323 | 324 | ||
| 324 | if (submit_reloc.submit_offset % 4) { | 325 | if (submit_reloc.submit_offset % 4) { |
| 325 | DRM_ERROR("non-aligned reloc offset: %u\n", | 326 | DRM_ERROR("non-aligned reloc offset: %u\n", |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index b28527a65d09..99e05aacbee1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -560,8 +560,7 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
| 560 | } | 560 | } |
| 561 | 561 | ||
| 562 | static const char *clk_names[] = { | 562 | static const char *clk_names[] = { |
| 563 | "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk", | 563 | "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface", |
| 564 | "mem_iface_clk", "alt_mem_iface_clk", | ||
| 565 | }; | 564 | }; |
| 566 | 565 | ||
| 567 | int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | 566 | int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, |
| @@ -625,13 +624,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 625 | 624 | ||
| 626 | /* Acquire clocks: */ | 625 | /* Acquire clocks: */ |
| 627 | for (i = 0; i < ARRAY_SIZE(clk_names); i++) { | 626 | for (i = 0; i < ARRAY_SIZE(clk_names); i++) { |
| 628 | gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); | 627 | gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]); |
| 629 | DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); | 628 | DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); |
| 630 | if (IS_ERR(gpu->grp_clks[i])) | 629 | if (IS_ERR(gpu->grp_clks[i])) |
| 631 | gpu->grp_clks[i] = NULL; | 630 | gpu->grp_clks[i] = NULL; |
| 632 | } | 631 | } |
| 633 | 632 | ||
| 634 | gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); | 633 | gpu->ebi1_clk = msm_clk_get(pdev, "bus"); |
| 635 | DBG("ebi1_clk: %p", gpu->ebi1_clk); | 634 | DBG("ebi1_clk: %p", gpu->ebi1_clk); |
| 636 | if (IS_ERR(gpu->ebi1_clk)) | 635 | if (IS_ERR(gpu->ebi1_clk)) |
| 637 | gpu->ebi1_clk = NULL; | 636 | gpu->ebi1_clk = NULL; |
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 61aaaa1de6eb..7f5779daf5c8 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c | |||
| @@ -24,9 +24,12 @@ struct msm_iommu { | |||
| 24 | }; | 24 | }; |
| 25 | #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) | 25 | #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) |
| 26 | 26 | ||
| 27 | static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, | 27 | static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, |
| 28 | unsigned long iova, int flags, void *arg) | 28 | unsigned long iova, int flags, void *arg) |
| 29 | { | 29 | { |
| 30 | struct msm_iommu *iommu = arg; | ||
| 31 | if (iommu->base.handler) | ||
| 32 | return iommu->base.handler(iommu->base.arg, iova, flags); | ||
| 30 | pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); | 33 | pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); |
| 31 | return 0; | 34 | return 0; |
| 32 | } | 35 | } |
| @@ -136,7 +139,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) | |||
| 136 | 139 | ||
| 137 | iommu->domain = domain; | 140 | iommu->domain = domain; |
| 138 | msm_mmu_init(&iommu->base, dev, &funcs); | 141 | msm_mmu_init(&iommu->base, dev, &funcs); |
| 139 | iommu_set_fault_handler(domain, msm_fault_handler, dev); | 142 | iommu_set_fault_handler(domain, msm_fault_handler, iommu); |
| 140 | 143 | ||
| 141 | return &iommu->base; | 144 | return &iommu->base; |
| 142 | } | 145 | } |
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index e470f4cf8f76..117635d2b8c5 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h | |||
| @@ -56,6 +56,9 @@ struct msm_kms_funcs { | |||
| 56 | struct drm_encoder *encoder, | 56 | struct drm_encoder *encoder, |
| 57 | struct drm_encoder *slave_encoder, | 57 | struct drm_encoder *slave_encoder, |
| 58 | bool is_cmd_mode); | 58 | bool is_cmd_mode); |
| 59 | void (*set_encoder_mode)(struct msm_kms *kms, | ||
| 60 | struct drm_encoder *encoder, | ||
| 61 | bool cmd_mode); | ||
| 59 | /* cleanup: */ | 62 | /* cleanup: */ |
| 60 | void (*destroy)(struct msm_kms *kms); | 63 | void (*destroy)(struct msm_kms *kms); |
| 61 | #ifdef CONFIG_DEBUG_FS | 64 | #ifdef CONFIG_DEBUG_FS |
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index f85c879e68d2..aa2c5d4580c8 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h | |||
| @@ -33,6 +33,8 @@ struct msm_mmu_funcs { | |||
| 33 | struct msm_mmu { | 33 | struct msm_mmu { |
| 34 | const struct msm_mmu_funcs *funcs; | 34 | const struct msm_mmu_funcs *funcs; |
| 35 | struct device *dev; | 35 | struct device *dev; |
| 36 | int (*handler)(void *arg, unsigned long iova, int flags); | ||
| 37 | void *arg; | ||
| 36 | }; | 38 | }; |
| 37 | 39 | ||
| 38 | static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, | 40 | static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, |
| @@ -45,4 +47,11 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, | |||
| 45 | struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); | 47 | struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); |
| 46 | struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); | 48 | struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); |
| 47 | 49 | ||
| 50 | static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, | ||
| 51 | int (*handler)(void *arg, unsigned long iova, int flags)) | ||
| 52 | { | ||
| 53 | mmu->arg = arg; | ||
| 54 | mmu->handler = handler; | ||
| 55 | } | ||
| 56 | |||
| 48 | #endif /* __MSM_MMU_H__ */ | 57 | #endif /* __MSM_MMU_H__ */ |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index a555681c3096..90075b676256 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c | |||
| @@ -198,7 +198,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, | |||
| 198 | int *burst, int *lwm) | 198 | int *burst, int *lwm) |
| 199 | { | 199 | { |
| 200 | struct nouveau_drm *drm = nouveau_drm(dev); | 200 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 201 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 201 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 202 | struct nv_fifo_info fifo_data; | 202 | struct nv_fifo_info fifo_data; |
| 203 | struct nv_sim_state sim_data; | 203 | struct nv_sim_state sim_data; |
| 204 | int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); | 204 | int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); |
| @@ -227,7 +227,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, | |||
| 227 | sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); | 227 | sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) | 230 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) |
| 231 | nv04_calc_arb(&fifo_data, &sim_data); | 231 | nv04_calc_arb(&fifo_data, &sim_data); |
| 232 | else | 232 | else |
| 233 | nv10_calc_arb(&fifo_data, &sim_data); | 233 | nv10_calc_arb(&fifo_data, &sim_data); |
| @@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm | |||
| 254 | { | 254 | { |
| 255 | struct nouveau_drm *drm = nouveau_drm(dev); | 255 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 256 | 256 | ||
| 257 | if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) | 257 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) |
| 258 | nv04_update_arb(dev, vclk, bpp, burst, lwm); | 258 | nv04_update_arb(dev, vclk, bpp, burst, lwm); |
| 259 | else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || | 259 | else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || |
| 260 | (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { | 260 | (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index a72754d73c84..ab7b69c11d40 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c | |||
| @@ -113,8 +113,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod | |||
| 113 | { | 113 | { |
| 114 | struct drm_device *dev = crtc->dev; | 114 | struct drm_device *dev = crtc->dev; |
| 115 | struct nouveau_drm *drm = nouveau_drm(dev); | 115 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 116 | struct nvkm_bios *bios = nvxx_bios(&drm->device); | 116 | struct nvkm_bios *bios = nvxx_bios(&drm->client.device); |
| 117 | struct nvkm_clk *clk = nvxx_clk(&drm->device); | 117 | struct nvkm_clk *clk = nvxx_clk(&drm->client.device); |
| 118 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 118 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 119 | struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; | 119 | struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; |
| 120 | struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; | 120 | struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; |
| @@ -138,7 +138,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod | |||
| 138 | * has yet been observed in allowing the use a single stage pll on all | 138 | * has yet been observed in allowing the use a single stage pll on all |
| 139 | * nv43 however. the behaviour of single stage use is untested on nv40 | 139 | * nv43 however. the behaviour of single stage use is untested on nv40 |
| 140 | */ | 140 | */ |
| 141 | if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) | 141 | if (drm->client.device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) |
| 142 | memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); | 142 | memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); |
| 143 | 143 | ||
| 144 | 144 | ||
| @@ -148,10 +148,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod | |||
| 148 | state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; | 148 | state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; |
| 149 | 149 | ||
| 150 | /* The blob uses this always, so let's do the same */ | 150 | /* The blob uses this always, so let's do the same */ |
| 151 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 151 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 152 | state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; | 152 | state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; |
| 153 | /* again nv40 and some nv43 act more like nv3x as described above */ | 153 | /* again nv40 and some nv43 act more like nv3x as described above */ |
| 154 | if (drm->device.info.chipset < 0x41) | 154 | if (drm->client.device.info.chipset < 0x41) |
| 155 | state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | | 155 | state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | |
| 156 | NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; | 156 | NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; |
| 157 | state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; | 157 | state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; |
| @@ -270,7 +270,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
| 270 | horizEnd = horizTotal - 2; | 270 | horizEnd = horizTotal - 2; |
| 271 | horizBlankEnd = horizTotal + 4; | 271 | horizBlankEnd = horizTotal + 4; |
| 272 | #if 0 | 272 | #if 0 |
| 273 | if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) | 273 | if (dev->overlayAdaptor && drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
| 274 | /* This reportedly works around some video overlay bandwidth problems */ | 274 | /* This reportedly works around some video overlay bandwidth problems */ |
| 275 | horizTotal += 2; | 275 | horizTotal += 2; |
| 276 | #endif | 276 | #endif |
| @@ -505,7 +505,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
| 505 | regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | | 505 | regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | |
| 506 | NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | | 506 | NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | |
| 507 | NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; | 507 | NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; |
| 508 | if (drm->device.info.chipset >= 0x11) | 508 | if (drm->client.device.info.chipset >= 0x11) |
| 509 | regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; | 509 | regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; |
| 510 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 510 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
| 511 | regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; | 511 | regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; |
| @@ -546,26 +546,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
| 546 | * 1 << 30 on 0x60.830), for no apparent reason */ | 546 | * 1 << 30 on 0x60.830), for no apparent reason */ |
| 547 | regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; | 547 | regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; |
| 548 | 548 | ||
| 549 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) | 549 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
| 550 | regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; | 550 | regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; |
| 551 | 551 | ||
| 552 | regp->crtc_830 = mode->crtc_vdisplay - 3; | 552 | regp->crtc_830 = mode->crtc_vdisplay - 3; |
| 553 | regp->crtc_834 = mode->crtc_vdisplay - 1; | 553 | regp->crtc_834 = mode->crtc_vdisplay - 1; |
| 554 | 554 | ||
| 555 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 555 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 556 | /* This is what the blob does */ | 556 | /* This is what the blob does */ |
| 557 | regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); | 557 | regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); |
| 558 | 558 | ||
| 559 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) | 559 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
| 560 | regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); | 560 | regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); |
| 561 | 561 | ||
| 562 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) | 562 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
| 563 | regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; | 563 | regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; |
| 564 | else | 564 | else |
| 565 | regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; | 565 | regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; |
| 566 | 566 | ||
| 567 | /* Some misc regs */ | 567 | /* Some misc regs */ |
| 568 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { | 568 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
| 569 | regp->CRTC[NV_CIO_CRE_85] = 0xFF; | 569 | regp->CRTC[NV_CIO_CRE_85] = 0xFF; |
| 570 | regp->CRTC[NV_CIO_CRE_86] = 0x1; | 570 | regp->CRTC[NV_CIO_CRE_86] = 0x1; |
| 571 | } | 571 | } |
| @@ -577,7 +577,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
| 577 | 577 | ||
| 578 | /* Generic PRAMDAC regs */ | 578 | /* Generic PRAMDAC regs */ |
| 579 | 579 | ||
| 580 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) | 580 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
| 581 | /* Only bit that bios and blob set. */ | 581 | /* Only bit that bios and blob set. */ |
| 582 | regp->nv10_cursync = (1 << 25); | 582 | regp->nv10_cursync = (1 << 25); |
| 583 | 583 | ||
| @@ -586,7 +586,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
| 586 | NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; | 586 | NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; |
| 587 | if (fb->format->depth == 16) | 587 | if (fb->format->depth == 16) |
| 588 | regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; | 588 | regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; |
| 589 | if (drm->device.info.chipset >= 0x11) | 589 | if (drm->client.device.info.chipset >= 0x11) |
| 590 | regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; | 590 | regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; |
| 591 | 591 | ||
| 592 | regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ | 592 | regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ |
| @@ -649,7 +649,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 649 | 649 | ||
| 650 | nv_crtc_mode_set_vga(crtc, adjusted_mode); | 650 | nv_crtc_mode_set_vga(crtc, adjusted_mode); |
| 651 | /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ | 651 | /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ |
| 652 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 652 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 653 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); | 653 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); |
| 654 | nv_crtc_mode_set_regs(crtc, adjusted_mode); | 654 | nv_crtc_mode_set_regs(crtc, adjusted_mode); |
| 655 | nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); | 655 | nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); |
| @@ -710,7 +710,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc) | |||
| 710 | 710 | ||
| 711 | /* Some more preparation. */ | 711 | /* Some more preparation. */ |
| 712 | NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); | 712 | NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); |
| 713 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { | 713 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
| 714 | uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); | 714 | uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); |
| 715 | NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); | 715 | NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); |
| 716 | } | 716 | } |
| @@ -886,7 +886,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
| 886 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); | 886 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); |
| 887 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); | 887 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); |
| 888 | 888 | ||
| 889 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) { | 889 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) { |
| 890 | regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; | 890 | regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; |
| 891 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); | 891 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); |
| 892 | } | 892 | } |
| @@ -967,7 +967,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, | |||
| 967 | { | 967 | { |
| 968 | struct nouveau_drm *drm = nouveau_drm(dev); | 968 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 969 | 969 | ||
| 970 | if (drm->device.info.chipset == 0x11) { | 970 | if (drm->client.device.info.chipset == 0x11) { |
| 971 | pixel = ((pixel & 0x000000ff) << 24) | | 971 | pixel = ((pixel & 0x000000ff) << 24) | |
| 972 | ((pixel & 0x0000ff00) << 8) | | 972 | ((pixel & 0x0000ff00) << 8) | |
| 973 | ((pixel & 0x00ff0000) >> 8) | | 973 | ((pixel & 0x00ff0000) >> 8) | |
| @@ -1008,7 +1008,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
| 1008 | if (ret) | 1008 | if (ret) |
| 1009 | goto out; | 1009 | goto out; |
| 1010 | 1010 | ||
| 1011 | if (drm->device.info.chipset >= 0x11) | 1011 | if (drm->client.device.info.chipset >= 0x11) |
| 1012 | nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); | 1012 | nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); |
| 1013 | else | 1013 | else |
| 1014 | nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); | 1014 | nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); |
| @@ -1124,8 +1124,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) | |||
| 1124 | drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); | 1124 | drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); |
| 1125 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); | 1125 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); |
| 1126 | 1126 | ||
| 1127 | ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, | 1127 | ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100, |
| 1128 | 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); | 1128 | TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL, |
| 1129 | &nv_crtc->cursor.nvbo); | ||
| 1129 | if (!ret) { | 1130 | if (!ret) { |
| 1130 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); | 1131 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); |
| 1131 | if (!ret) { | 1132 | if (!ret) { |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c index c83116a308a4..f26e44ea7389 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c +++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c | |||
| @@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) | |||
| 55 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); | 55 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); |
| 56 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); | 56 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); |
| 57 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); | 57 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); |
| 58 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 58 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 59 | nv_fix_nv40_hw_cursor(dev, nv_crtc->index); | 59 | nv_fix_nv40_hw_cursor(dev, nv_crtc->index); |
| 60 | } | 60 | } |
| 61 | 61 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index b6cc7766e6f7..4feab0a5419d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c | |||
| @@ -66,7 +66,7 @@ int nv04_dac_output_offset(struct drm_encoder *encoder) | |||
| 66 | static int sample_load_twice(struct drm_device *dev, bool sense[2]) | 66 | static int sample_load_twice(struct drm_device *dev, bool sense[2]) |
| 67 | { | 67 | { |
| 68 | struct nouveau_drm *drm = nouveau_drm(dev); | 68 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 69 | struct nvif_object *device = &drm->device.object; | 69 | struct nvif_object *device = &drm->client.device.object; |
| 70 | int i; | 70 | int i; |
| 71 | 71 | ||
| 72 | for (i = 0; i < 2; i++) { | 72 | for (i = 0; i < 2; i++) { |
| @@ -80,19 +80,19 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2]) | |||
| 80 | * use a 10ms timeout (guards against crtc being inactive, in | 80 | * use a 10ms timeout (guards against crtc being inactive, in |
| 81 | * which case blank state would never change) | 81 | * which case blank state would never change) |
| 82 | */ | 82 | */ |
| 83 | if (nvif_msec(&drm->device, 10, | 83 | if (nvif_msec(&drm->client.device, 10, |
| 84 | if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) | 84 | if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) |
| 85 | break; | 85 | break; |
| 86 | ) < 0) | 86 | ) < 0) |
| 87 | return -EBUSY; | 87 | return -EBUSY; |
| 88 | 88 | ||
| 89 | if (nvif_msec(&drm->device, 10, | 89 | if (nvif_msec(&drm->client.device, 10, |
| 90 | if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) | 90 | if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) |
| 91 | break; | 91 | break; |
| 92 | ) < 0) | 92 | ) < 0) |
| 93 | return -EBUSY; | 93 | return -EBUSY; |
| 94 | 94 | ||
| 95 | if (nvif_msec(&drm->device, 10, | 95 | if (nvif_msec(&drm->client.device, 10, |
| 96 | if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) | 96 | if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) |
| 97 | break; | 97 | break; |
| 98 | ) < 0) | 98 | ) < 0) |
| @@ -133,7 +133,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
| 133 | struct drm_connector *connector) | 133 | struct drm_connector *connector) |
| 134 | { | 134 | { |
| 135 | struct drm_device *dev = encoder->dev; | 135 | struct drm_device *dev = encoder->dev; |
| 136 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 136 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 137 | struct nouveau_drm *drm = nouveau_drm(dev); | 137 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 138 | uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; | 138 | uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; |
| 139 | uint8_t saved_palette0[3], saved_palette_mask; | 139 | uint8_t saved_palette0[3], saved_palette_mask; |
| @@ -236,8 +236,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) | |||
| 236 | { | 236 | { |
| 237 | struct drm_device *dev = encoder->dev; | 237 | struct drm_device *dev = encoder->dev; |
| 238 | struct nouveau_drm *drm = nouveau_drm(dev); | 238 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 239 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 239 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 240 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); | 240 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); |
| 241 | struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; | 241 | struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; |
| 242 | uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); | 242 | uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); |
| 243 | uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, | 243 | uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, |
| @@ -288,7 +288,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) | |||
| 288 | /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ | 288 | /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ |
| 289 | routput = (saved_routput & 0xfffffece) | head << 8; | 289 | routput = (saved_routput & 0xfffffece) | head << 8; |
| 290 | 290 | ||
| 291 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) { | 291 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) { |
| 292 | if (dcb->type == DCB_OUTPUT_TV) | 292 | if (dcb->type == DCB_OUTPUT_TV) |
| 293 | routput |= 0x1a << 16; | 293 | routput |= 0x1a << 16; |
| 294 | else | 294 | else |
| @@ -403,7 +403,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder, | |||
| 403 | } | 403 | } |
| 404 | 404 | ||
| 405 | /* This could use refinement for flatpanels, but it should work this way */ | 405 | /* This could use refinement for flatpanels, but it should work this way */ |
| 406 | if (drm->device.info.chipset < 0x44) | 406 | if (drm->client.device.info.chipset < 0x44) |
| 407 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); | 407 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); |
| 408 | else | 408 | else |
| 409 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); | 409 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 2e5bb2afda7c..9805d2cdc1a1 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c | |||
| @@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
| 281 | struct drm_display_mode *adjusted_mode) | 281 | struct drm_display_mode *adjusted_mode) |
| 282 | { | 282 | { |
| 283 | struct drm_device *dev = encoder->dev; | 283 | struct drm_device *dev = encoder->dev; |
| 284 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 284 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 285 | struct nouveau_drm *drm = nouveau_drm(dev); | 285 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 286 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | 286 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); |
| 287 | struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; | 287 | struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; |
| @@ -417,7 +417,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
| 417 | if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || | 417 | if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || |
| 418 | (nv_connector->dithering_mode == DITHERING_MODE_AUTO && | 418 | (nv_connector->dithering_mode == DITHERING_MODE_AUTO && |
| 419 | fb->format->depth > connector->display_info.bpc * 3)) { | 419 | fb->format->depth > connector->display_info.bpc * 3)) { |
| 420 | if (drm->device.info.chipset == 0x11) | 420 | if (drm->client.device.info.chipset == 0x11) |
| 421 | regp->dither = savep->dither | 0x00010000; | 421 | regp->dither = savep->dither | 0x00010000; |
| 422 | else { | 422 | else { |
| 423 | int i; | 423 | int i; |
| @@ -428,7 +428,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
| 428 | } | 428 | } |
| 429 | } | 429 | } |
| 430 | } else { | 430 | } else { |
| 431 | if (drm->device.info.chipset != 0x11) { | 431 | if (drm->client.device.info.chipset != 0x11) { |
| 432 | /* reset them */ | 432 | /* reset them */ |
| 433 | int i; | 433 | int i; |
| 434 | for (i = 0; i < 3; i++) { | 434 | for (i = 0; i < 3; i++) { |
| @@ -464,7 +464,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) | |||
| 464 | NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); | 464 | NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); |
| 465 | 465 | ||
| 466 | /* This could use refinement for flatpanels, but it should work this way */ | 466 | /* This could use refinement for flatpanels, but it should work this way */ |
| 467 | if (drm->device.info.chipset < 0x44) | 467 | if (drm->client.device.info.chipset < 0x44) |
| 468 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); | 468 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); |
| 469 | else | 469 | else |
| 470 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); | 470 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); |
| @@ -486,7 +486,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) | |||
| 486 | { | 486 | { |
| 487 | #ifdef __powerpc__ | 487 | #ifdef __powerpc__ |
| 488 | struct drm_device *dev = encoder->dev; | 488 | struct drm_device *dev = encoder->dev; |
| 489 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 489 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 490 | 490 | ||
| 491 | /* BIOS scripts usually take care of the backlight, thanks | 491 | /* BIOS scripts usually take care of the backlight, thanks |
| 492 | * Apple for your consistency. | 492 | * Apple for your consistency. |
| @@ -624,7 +624,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) | |||
| 624 | struct drm_device *dev = encoder->dev; | 624 | struct drm_device *dev = encoder->dev; |
| 625 | struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; | 625 | struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; |
| 626 | struct nouveau_drm *drm = nouveau_drm(dev); | 626 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 627 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); | 627 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); |
| 628 | struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI); | 628 | struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI); |
| 629 | struct nvkm_i2c_bus_probe info[] = { | 629 | struct nvkm_i2c_bus_probe info[] = { |
| 630 | { | 630 | { |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 34c0f2f67548..5b9d549aa791 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c | |||
| @@ -35,7 +35,7 @@ int | |||
| 35 | nv04_display_create(struct drm_device *dev) | 35 | nv04_display_create(struct drm_device *dev) |
| 36 | { | 36 | { |
| 37 | struct nouveau_drm *drm = nouveau_drm(dev); | 37 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 38 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); | 38 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); |
| 39 | struct dcb_table *dcb = &drm->vbios.dcb; | 39 | struct dcb_table *dcb = &drm->vbios.dcb; |
| 40 | struct drm_connector *connector, *ct; | 40 | struct drm_connector *connector, *ct; |
| 41 | struct drm_encoder *encoder; | 41 | struct drm_encoder *encoder; |
| @@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev) | |||
| 48 | if (!disp) | 48 | if (!disp) |
| 49 | return -ENOMEM; | 49 | return -ENOMEM; |
| 50 | 50 | ||
| 51 | nvif_object_map(&drm->device.object); | 51 | nvif_object_map(&drm->client.device.object); |
| 52 | 52 | ||
| 53 | nouveau_display(dev)->priv = disp; | 53 | nouveau_display(dev)->priv = disp; |
| 54 | nouveau_display(dev)->dtor = nv04_display_destroy; | 54 | nouveau_display(dev)->dtor = nv04_display_destroy; |
| @@ -139,7 +139,7 @@ nv04_display_destroy(struct drm_device *dev) | |||
| 139 | nouveau_display(dev)->priv = NULL; | 139 | nouveau_display(dev)->priv = NULL; |
| 140 | kfree(disp); | 140 | kfree(disp); |
| 141 | 141 | ||
| 142 | nvif_object_unmap(&drm->device.object); | 142 | nvif_object_unmap(&drm->client.device.object); |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | int | 145 | int |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index 7030307d2d48..bea4543554ba 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h | |||
| @@ -129,7 +129,7 @@ nv_two_heads(struct drm_device *dev) | |||
| 129 | struct nouveau_drm *drm = nouveau_drm(dev); | 129 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 130 | const int impl = dev->pdev->device & 0x0ff0; | 130 | const int impl = dev->pdev->device & 0x0ff0; |
| 131 | 131 | ||
| 132 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 && | 132 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 && |
| 133 | impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) | 133 | impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) |
| 134 | return true; | 134 | return true; |
| 135 | 135 | ||
| @@ -148,7 +148,7 @@ nv_two_reg_pll(struct drm_device *dev) | |||
| 148 | struct nouveau_drm *drm = nouveau_drm(dev); | 148 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 149 | const int impl = dev->pdev->device & 0x0ff0; | 149 | const int impl = dev->pdev->device & 0x0ff0; |
| 150 | 150 | ||
| 151 | if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) | 151 | if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) |
| 152 | return true; | 152 | return true; |
| 153 | return false; | 153 | return false; |
| 154 | } | 154 | } |
| @@ -170,7 +170,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table, | |||
| 170 | struct dcb_output *outp, int crtc) | 170 | struct dcb_output *outp, int crtc) |
| 171 | { | 171 | { |
| 172 | struct nouveau_drm *drm = nouveau_drm(dev); | 172 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 173 | struct nvkm_bios *bios = nvxx_bios(&drm->device); | 173 | struct nvkm_bios *bios = nvxx_bios(&drm->client.device); |
| 174 | struct nvbios_init init = { | 174 | struct nvbios_init init = { |
| 175 | .subdev = &bios->subdev, | 175 | .subdev = &bios->subdev, |
| 176 | .bios = bios, | 176 | .bios = bios, |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 74856a8b8f35..b98599002831 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c | |||
| @@ -89,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner) | |||
| 89 | if (owner == 1) | 89 | if (owner == 1) |
| 90 | owner *= 3; | 90 | owner *= 3; |
| 91 | 91 | ||
| 92 | if (drm->device.info.chipset == 0x11) { | 92 | if (drm->client.device.info.chipset == 0x11) { |
| 93 | /* This might seem stupid, but the blob does it and | 93 | /* This might seem stupid, but the blob does it and |
| 94 | * omitting it often locks the system up. | 94 | * omitting it often locks the system up. |
| 95 | */ | 95 | */ |
| @@ -100,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner) | |||
| 100 | /* CR44 is always changed on CRTC0 */ | 100 | /* CR44 is always changed on CRTC0 */ |
| 101 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); | 101 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); |
| 102 | 102 | ||
| 103 | if (drm->device.info.chipset == 0x11) { /* set me harder */ | 103 | if (drm->client.device.info.chipset == 0x11) { /* set me harder */ |
| 104 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); | 104 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); |
| 105 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); | 105 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); |
| 106 | } | 106 | } |
| @@ -149,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, | |||
| 149 | pllvals->NM1 = pll1 & 0xffff; | 149 | pllvals->NM1 = pll1 & 0xffff; |
| 150 | if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) | 150 | if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) |
| 151 | pllvals->NM2 = pll2 & 0xffff; | 151 | pllvals->NM2 = pll2 & 0xffff; |
| 152 | else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) { | 152 | else if (drm->client.device.info.chipset == 0x30 || drm->client.device.info.chipset == 0x35) { |
| 153 | pllvals->M1 &= 0xf; /* only 4 bits */ | 153 | pllvals->M1 &= 0xf; /* only 4 bits */ |
| 154 | if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { | 154 | if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { |
| 155 | pllvals->M2 = (pll1 >> 4) & 0x7; | 155 | pllvals->M2 = (pll1 >> 4) & 0x7; |
| @@ -165,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, | |||
| 165 | struct nvkm_pll_vals *pllvals) | 165 | struct nvkm_pll_vals *pllvals) |
| 166 | { | 166 | { |
| 167 | struct nouveau_drm *drm = nouveau_drm(dev); | 167 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 168 | struct nvif_object *device = &drm->device.object; | 168 | struct nvif_object *device = &drm->client.device.object; |
| 169 | struct nvkm_bios *bios = nvxx_bios(&drm->device); | 169 | struct nvkm_bios *bios = nvxx_bios(&drm->client.device); |
| 170 | uint32_t reg1, pll1, pll2 = 0; | 170 | uint32_t reg1, pll1, pll2 = 0; |
| 171 | struct nvbios_pll pll_lim; | 171 | struct nvbios_pll pll_lim; |
| 172 | int ret; | 172 | int ret; |
| @@ -184,7 +184,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, | |||
| 184 | pll2 = nvif_rd32(device, reg2); | 184 | pll2 = nvif_rd32(device, reg2); |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) { | 187 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) { |
| 188 | uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); | 188 | uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); |
| 189 | 189 | ||
| 190 | /* check whether vpll has been forced into single stage mode */ | 190 | /* check whether vpll has been forced into single stage mode */ |
| @@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) | |||
| 222 | uint32_t mpllP; | 222 | uint32_t mpllP; |
| 223 | 223 | ||
| 224 | pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); | 224 | pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); |
| 225 | mpllP = (mpllP >> 8) & 0xf; | ||
| 225 | if (!mpllP) | 226 | if (!mpllP) |
| 226 | mpllP = 4; | 227 | mpllP = 4; |
| 227 | 228 | ||
| @@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) | |||
| 232 | uint32_t clock; | 233 | uint32_t clock; |
| 233 | 234 | ||
| 234 | pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); | 235 | pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); |
| 235 | return clock; | 236 | return clock / 1000; |
| 236 | } | 237 | } |
| 237 | 238 | ||
| 238 | ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); | 239 | ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); |
| @@ -252,7 +253,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) | |||
| 252 | */ | 253 | */ |
| 253 | 254 | ||
| 254 | struct nouveau_drm *drm = nouveau_drm(dev); | 255 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 255 | struct nvif_device *device = &drm->device; | 256 | struct nvif_device *device = &drm->client.device; |
| 256 | struct nvkm_clk *clk = nvxx_clk(device); | 257 | struct nvkm_clk *clk = nvxx_clk(device); |
| 257 | struct nvkm_bios *bios = nvxx_bios(device); | 258 | struct nvkm_bios *bios = nvxx_bios(device); |
| 258 | struct nvbios_pll pll_lim; | 259 | struct nvbios_pll pll_lim; |
| @@ -391,21 +392,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head, | |||
| 391 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; | 392 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; |
| 392 | int i; | 393 | int i; |
| 393 | 394 | ||
| 394 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) | 395 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
| 395 | regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); | 396 | regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); |
| 396 | 397 | ||
| 397 | nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); | 398 | nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); |
| 398 | state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); | 399 | state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); |
| 399 | if (nv_two_heads(dev)) | 400 | if (nv_two_heads(dev)) |
| 400 | state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); | 401 | state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); |
| 401 | if (drm->device.info.chipset == 0x11) | 402 | if (drm->client.device.info.chipset == 0x11) |
| 402 | regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); | 403 | regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); |
| 403 | 404 | ||
| 404 | regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); | 405 | regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); |
| 405 | 406 | ||
| 406 | if (nv_gf4_disp_arch(dev)) | 407 | if (nv_gf4_disp_arch(dev)) |
| 407 | regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); | 408 | regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); |
| 408 | if (drm->device.info.chipset >= 0x30) | 409 | if (drm->client.device.info.chipset >= 0x30) |
| 409 | regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); | 410 | regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); |
| 410 | 411 | ||
| 411 | regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); | 412 | regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); |
| @@ -447,7 +448,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head, | |||
| 447 | if (nv_gf4_disp_arch(dev)) | 448 | if (nv_gf4_disp_arch(dev)) |
| 448 | regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); | 449 | regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); |
| 449 | 450 | ||
| 450 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { | 451 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
| 451 | regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); | 452 | regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); |
| 452 | regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); | 453 | regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); |
| 453 | regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); | 454 | regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); |
| @@ -463,26 +464,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head, | |||
| 463 | struct nv04_mode_state *state) | 464 | struct nv04_mode_state *state) |
| 464 | { | 465 | { |
| 465 | struct nouveau_drm *drm = nouveau_drm(dev); | 466 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 466 | struct nvkm_clk *clk = nvxx_clk(&drm->device); | 467 | struct nvkm_clk *clk = nvxx_clk(&drm->client.device); |
| 467 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; | 468 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; |
| 468 | uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; | 469 | uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; |
| 469 | int i; | 470 | int i; |
| 470 | 471 | ||
| 471 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) | 472 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
| 472 | NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); | 473 | NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); |
| 473 | 474 | ||
| 474 | clk->pll_prog(clk, pllreg, ®p->pllvals); | 475 | clk->pll_prog(clk, pllreg, ®p->pllvals); |
| 475 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); | 476 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); |
| 476 | if (nv_two_heads(dev)) | 477 | if (nv_two_heads(dev)) |
| 477 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); | 478 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); |
| 478 | if (drm->device.info.chipset == 0x11) | 479 | if (drm->client.device.info.chipset == 0x11) |
| 479 | NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); | 480 | NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); |
| 480 | 481 | ||
| 481 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); | 482 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); |
| 482 | 483 | ||
| 483 | if (nv_gf4_disp_arch(dev)) | 484 | if (nv_gf4_disp_arch(dev)) |
| 484 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); | 485 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); |
| 485 | if (drm->device.info.chipset >= 0x30) | 486 | if (drm->client.device.info.chipset >= 0x30) |
| 486 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); | 487 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); |
| 487 | 488 | ||
| 488 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); | 489 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); |
| @@ -519,7 +520,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head, | |||
| 519 | if (nv_gf4_disp_arch(dev)) | 520 | if (nv_gf4_disp_arch(dev)) |
| 520 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); | 521 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); |
| 521 | 522 | ||
| 522 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { | 523 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
| 523 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); | 524 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); |
| 524 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); | 525 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); |
| 525 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); | 526 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); |
| @@ -600,10 +601,10 @@ nv_save_state_ext(struct drm_device *dev, int head, | |||
| 600 | rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); | 601 | rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); |
| 601 | rd_cio_state(dev, head, regp, NV_CIO_CRE_21); | 602 | rd_cio_state(dev, head, regp, NV_CIO_CRE_21); |
| 602 | 603 | ||
| 603 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) | 604 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) |
| 604 | rd_cio_state(dev, head, regp, NV_CIO_CRE_47); | 605 | rd_cio_state(dev, head, regp, NV_CIO_CRE_47); |
| 605 | 606 | ||
| 606 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) | 607 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
| 607 | rd_cio_state(dev, head, regp, 0x9f); | 608 | rd_cio_state(dev, head, regp, 0x9f); |
| 608 | 609 | ||
| 609 | rd_cio_state(dev, head, regp, NV_CIO_CRE_49); | 610 | rd_cio_state(dev, head, regp, NV_CIO_CRE_49); |
| @@ -612,14 +613,14 @@ nv_save_state_ext(struct drm_device *dev, int head, | |||
| 612 | rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); | 613 | rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); |
| 613 | rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); | 614 | rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); |
| 614 | 615 | ||
| 615 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { | 616 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
| 616 | regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); | 617 | regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); |
| 617 | regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); | 618 | regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); |
| 618 | 619 | ||
| 619 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) | 620 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
| 620 | regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); | 621 | regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); |
| 621 | 622 | ||
| 622 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 623 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 623 | regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); | 624 | regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); |
| 624 | 625 | ||
| 625 | if (nv_two_heads(dev)) | 626 | if (nv_two_heads(dev)) |
| @@ -631,7 +632,7 @@ nv_save_state_ext(struct drm_device *dev, int head, | |||
| 631 | 632 | ||
| 632 | rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); | 633 | rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); |
| 633 | rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); | 634 | rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); |
| 634 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { | 635 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
| 635 | rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); | 636 | rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); |
| 636 | rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); | 637 | rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); |
| 637 | rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); | 638 | rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); |
| @@ -660,12 +661,12 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
| 660 | struct nv04_mode_state *state) | 661 | struct nv04_mode_state *state) |
| 661 | { | 662 | { |
| 662 | struct nouveau_drm *drm = nouveau_drm(dev); | 663 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 663 | struct nvif_object *device = &drm->device.object; | 664 | struct nvif_object *device = &drm->client.device.object; |
| 664 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; | 665 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; |
| 665 | uint32_t reg900; | 666 | uint32_t reg900; |
| 666 | int i; | 667 | int i; |
| 667 | 668 | ||
| 668 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { | 669 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
| 669 | if (nv_two_heads(dev)) | 670 | if (nv_two_heads(dev)) |
| 670 | /* setting ENGINE_CTRL (EC) *must* come before | 671 | /* setting ENGINE_CTRL (EC) *must* come before |
| 671 | * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in | 672 | * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in |
| @@ -677,20 +678,20 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
| 677 | nvif_wr32(device, NV_PVIDEO_INTR_EN, 0); | 678 | nvif_wr32(device, NV_PVIDEO_INTR_EN, 0); |
| 678 | nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); | 679 | nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); |
| 679 | nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); | 680 | nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); |
| 680 | nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1); | 681 | nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->client.device.info.ram_size - 1); |
| 681 | nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1); | 682 | nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->client.device.info.ram_size - 1); |
| 682 | nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1); | 683 | nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->client.device.info.ram_size - 1); |
| 683 | nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1); | 684 | nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->client.device.info.ram_size - 1); |
| 684 | nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); | 685 | nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); |
| 685 | 686 | ||
| 686 | NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); | 687 | NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); |
| 687 | NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); | 688 | NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); |
| 688 | NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); | 689 | NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); |
| 689 | 690 | ||
| 690 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) | 691 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
| 691 | NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); | 692 | NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); |
| 692 | 693 | ||
| 693 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { | 694 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
| 694 | NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); | 695 | NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); |
| 695 | 696 | ||
| 696 | reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); | 697 | reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); |
| @@ -713,23 +714,23 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
| 713 | wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); | 714 | wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); |
| 714 | wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); | 715 | wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); |
| 715 | 716 | ||
| 716 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) | 717 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) |
| 717 | wr_cio_state(dev, head, regp, NV_CIO_CRE_47); | 718 | wr_cio_state(dev, head, regp, NV_CIO_CRE_47); |
| 718 | 719 | ||
| 719 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) | 720 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
| 720 | wr_cio_state(dev, head, regp, 0x9f); | 721 | wr_cio_state(dev, head, regp, 0x9f); |
| 721 | 722 | ||
| 722 | wr_cio_state(dev, head, regp, NV_CIO_CRE_49); | 723 | wr_cio_state(dev, head, regp, NV_CIO_CRE_49); |
| 723 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); | 724 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); |
| 724 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); | 725 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); |
| 725 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); | 726 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); |
| 726 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 727 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 727 | nv_fix_nv40_hw_cursor(dev, head); | 728 | nv_fix_nv40_hw_cursor(dev, head); |
| 728 | wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); | 729 | wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); |
| 729 | 730 | ||
| 730 | wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); | 731 | wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); |
| 731 | wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); | 732 | wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); |
| 732 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { | 733 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
| 733 | wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); | 734 | wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); |
| 734 | wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); | 735 | wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); |
| 735 | wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); | 736 | wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); |
| @@ -737,14 +738,14 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
| 737 | } | 738 | } |
| 738 | /* NV11 and NV20 stop at 0x52. */ | 739 | /* NV11 and NV20 stop at 0x52. */ |
| 739 | if (nv_gf4_disp_arch(dev)) { | 740 | if (nv_gf4_disp_arch(dev)) { |
| 740 | if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) { | 741 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) { |
| 741 | /* Not waiting for vertical retrace before modifying | 742 | /* Not waiting for vertical retrace before modifying |
| 742 | CRE_53/CRE_54 causes lockups. */ | 743 | CRE_53/CRE_54 causes lockups. */ |
| 743 | nvif_msec(&drm->device, 650, | 744 | nvif_msec(&drm->client.device, 650, |
| 744 | if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) | 745 | if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) |
| 745 | break; | 746 | break; |
| 746 | ); | 747 | ); |
| 747 | nvif_msec(&drm->device, 650, | 748 | nvif_msec(&drm->client.device, 650, |
| 748 | if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) | 749 | if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) |
| 749 | break; | 750 | break; |
| 750 | ); | 751 | ); |
| @@ -770,7 +771,7 @@ static void | |||
| 770 | nv_save_state_palette(struct drm_device *dev, int head, | 771 | nv_save_state_palette(struct drm_device *dev, int head, |
| 771 | struct nv04_mode_state *state) | 772 | struct nv04_mode_state *state) |
| 772 | { | 773 | { |
| 773 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 774 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 774 | int head_offset = head * NV_PRMDIO_SIZE, i; | 775 | int head_offset = head * NV_PRMDIO_SIZE, i; |
| 775 | 776 | ||
| 776 | nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, | 777 | nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, |
| @@ -789,7 +790,7 @@ void | |||
| 789 | nouveau_hw_load_state_palette(struct drm_device *dev, int head, | 790 | nouveau_hw_load_state_palette(struct drm_device *dev, int head, |
| 790 | struct nv04_mode_state *state) | 791 | struct nv04_mode_state *state) |
| 791 | { | 792 | { |
| 792 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 793 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 793 | int head_offset = head * NV_PRMDIO_SIZE, i; | 794 | int head_offset = head * NV_PRMDIO_SIZE, i; |
| 794 | 795 | ||
| 795 | nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, | 796 | nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, |
| @@ -809,7 +810,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head, | |||
| 809 | { | 810 | { |
| 810 | struct nouveau_drm *drm = nouveau_drm(dev); | 811 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 811 | 812 | ||
| 812 | if (drm->device.info.chipset == 0x11) | 813 | if (drm->client.device.info.chipset == 0x11) |
| 813 | /* NB: no attempt is made to restore the bad pll later on */ | 814 | /* NB: no attempt is made to restore the bad pll later on */ |
| 814 | nouveau_hw_fix_bad_vpll(dev, head); | 815 | nouveau_hw_fix_bad_vpll(dev, head); |
| 815 | nv_save_state_ramdac(dev, head, state); | 816 | nv_save_state_ramdac(dev, head, state); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h index 3bded60c5596..3a2be47fb4f1 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.h +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h | |||
| @@ -60,7 +60,7 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp, | |||
| 60 | static inline uint32_t NVReadCRTC(struct drm_device *dev, | 60 | static inline uint32_t NVReadCRTC(struct drm_device *dev, |
| 61 | int head, uint32_t reg) | 61 | int head, uint32_t reg) |
| 62 | { | 62 | { |
| 63 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 63 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 64 | uint32_t val; | 64 | uint32_t val; |
| 65 | if (head) | 65 | if (head) |
| 66 | reg += NV_PCRTC0_SIZE; | 66 | reg += NV_PCRTC0_SIZE; |
| @@ -71,7 +71,7 @@ static inline uint32_t NVReadCRTC(struct drm_device *dev, | |||
| 71 | static inline void NVWriteCRTC(struct drm_device *dev, | 71 | static inline void NVWriteCRTC(struct drm_device *dev, |
| 72 | int head, uint32_t reg, uint32_t val) | 72 | int head, uint32_t reg, uint32_t val) |
| 73 | { | 73 | { |
| 74 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 74 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 75 | if (head) | 75 | if (head) |
| 76 | reg += NV_PCRTC0_SIZE; | 76 | reg += NV_PCRTC0_SIZE; |
| 77 | nvif_wr32(device, reg, val); | 77 | nvif_wr32(device, reg, val); |
| @@ -80,7 +80,7 @@ static inline void NVWriteCRTC(struct drm_device *dev, | |||
| 80 | static inline uint32_t NVReadRAMDAC(struct drm_device *dev, | 80 | static inline uint32_t NVReadRAMDAC(struct drm_device *dev, |
| 81 | int head, uint32_t reg) | 81 | int head, uint32_t reg) |
| 82 | { | 82 | { |
| 83 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 83 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 84 | uint32_t val; | 84 | uint32_t val; |
| 85 | if (head) | 85 | if (head) |
| 86 | reg += NV_PRAMDAC0_SIZE; | 86 | reg += NV_PRAMDAC0_SIZE; |
| @@ -91,7 +91,7 @@ static inline uint32_t NVReadRAMDAC(struct drm_device *dev, | |||
| 91 | static inline void NVWriteRAMDAC(struct drm_device *dev, | 91 | static inline void NVWriteRAMDAC(struct drm_device *dev, |
| 92 | int head, uint32_t reg, uint32_t val) | 92 | int head, uint32_t reg, uint32_t val) |
| 93 | { | 93 | { |
| 94 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 94 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 95 | if (head) | 95 | if (head) |
| 96 | reg += NV_PRAMDAC0_SIZE; | 96 | reg += NV_PRAMDAC0_SIZE; |
| 97 | nvif_wr32(device, reg, val); | 97 | nvif_wr32(device, reg, val); |
| @@ -120,7 +120,7 @@ static inline void nv_write_tmds(struct drm_device *dev, | |||
| 120 | static inline void NVWriteVgaCrtc(struct drm_device *dev, | 120 | static inline void NVWriteVgaCrtc(struct drm_device *dev, |
| 121 | int head, uint8_t index, uint8_t value) | 121 | int head, uint8_t index, uint8_t value) |
| 122 | { | 122 | { |
| 123 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 123 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 124 | nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); | 124 | nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); |
| 125 | nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); | 125 | nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); |
| 126 | } | 126 | } |
| @@ -128,7 +128,7 @@ static inline void NVWriteVgaCrtc(struct drm_device *dev, | |||
| 128 | static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, | 128 | static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, |
| 129 | int head, uint8_t index) | 129 | int head, uint8_t index) |
| 130 | { | 130 | { |
| 131 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 131 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 132 | uint8_t val; | 132 | uint8_t val; |
| 133 | nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); | 133 | nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); |
| 134 | val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); | 134 | val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); |
| @@ -165,13 +165,13 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_ | |||
| 165 | static inline uint8_t NVReadPRMVIO(struct drm_device *dev, | 165 | static inline uint8_t NVReadPRMVIO(struct drm_device *dev, |
| 166 | int head, uint32_t reg) | 166 | int head, uint32_t reg) |
| 167 | { | 167 | { |
| 168 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 168 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 169 | struct nouveau_drm *drm = nouveau_drm(dev); | 169 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 170 | uint8_t val; | 170 | uint8_t val; |
| 171 | 171 | ||
| 172 | /* Only NV4x have two pvio ranges; other twoHeads cards MUST call | 172 | /* Only NV4x have two pvio ranges; other twoHeads cards MUST call |
| 173 | * NVSetOwner for the relevant head to be programmed */ | 173 | * NVSetOwner for the relevant head to be programmed */ |
| 174 | if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 174 | if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 175 | reg += NV_PRMVIO_SIZE; | 175 | reg += NV_PRMVIO_SIZE; |
| 176 | 176 | ||
| 177 | val = nvif_rd08(device, reg); | 177 | val = nvif_rd08(device, reg); |
| @@ -181,12 +181,12 @@ static inline uint8_t NVReadPRMVIO(struct drm_device *dev, | |||
| 181 | static inline void NVWritePRMVIO(struct drm_device *dev, | 181 | static inline void NVWritePRMVIO(struct drm_device *dev, |
| 182 | int head, uint32_t reg, uint8_t value) | 182 | int head, uint32_t reg, uint8_t value) |
| 183 | { | 183 | { |
| 184 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 184 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 185 | struct nouveau_drm *drm = nouveau_drm(dev); | 185 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 186 | 186 | ||
| 187 | /* Only NV4x have two pvio ranges; other twoHeads cards MUST call | 187 | /* Only NV4x have two pvio ranges; other twoHeads cards MUST call |
| 188 | * NVSetOwner for the relevant head to be programmed */ | 188 | * NVSetOwner for the relevant head to be programmed */ |
| 189 | if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 189 | if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 190 | reg += NV_PRMVIO_SIZE; | 190 | reg += NV_PRMVIO_SIZE; |
| 191 | 191 | ||
| 192 | nvif_wr08(device, reg, value); | 192 | nvif_wr08(device, reg, value); |
| @@ -194,14 +194,14 @@ static inline void NVWritePRMVIO(struct drm_device *dev, | |||
| 194 | 194 | ||
| 195 | static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) | 195 | static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) |
| 196 | { | 196 | { |
| 197 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 197 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 198 | nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); | 198 | nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); |
| 199 | nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); | 199 | nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | static inline bool NVGetEnablePalette(struct drm_device *dev, int head) | 202 | static inline bool NVGetEnablePalette(struct drm_device *dev, int head) |
| 203 | { | 203 | { |
| 204 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 204 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 205 | nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); | 205 | nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); |
| 206 | return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); | 206 | return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); |
| 207 | } | 207 | } |
| @@ -209,7 +209,7 @@ static inline bool NVGetEnablePalette(struct drm_device *dev, int head) | |||
| 209 | static inline void NVWriteVgaAttr(struct drm_device *dev, | 209 | static inline void NVWriteVgaAttr(struct drm_device *dev, |
| 210 | int head, uint8_t index, uint8_t value) | 210 | int head, uint8_t index, uint8_t value) |
| 211 | { | 211 | { |
| 212 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 212 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 213 | if (NVGetEnablePalette(dev, head)) | 213 | if (NVGetEnablePalette(dev, head)) |
| 214 | index &= ~0x20; | 214 | index &= ~0x20; |
| 215 | else | 215 | else |
| @@ -223,7 +223,7 @@ static inline void NVWriteVgaAttr(struct drm_device *dev, | |||
| 223 | static inline uint8_t NVReadVgaAttr(struct drm_device *dev, | 223 | static inline uint8_t NVReadVgaAttr(struct drm_device *dev, |
| 224 | int head, uint8_t index) | 224 | int head, uint8_t index) |
| 225 | { | 225 | { |
| 226 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 226 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 227 | uint8_t val; | 227 | uint8_t val; |
| 228 | if (NVGetEnablePalette(dev, head)) | 228 | if (NVGetEnablePalette(dev, head)) |
| 229 | index &= ~0x20; | 229 | index &= ~0x20; |
| @@ -259,10 +259,10 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) | |||
| 259 | static inline bool | 259 | static inline bool |
| 260 | nv_heads_tied(struct drm_device *dev) | 260 | nv_heads_tied(struct drm_device *dev) |
| 261 | { | 261 | { |
| 262 | struct nvif_object *device = &nouveau_drm(dev)->device.object; | 262 | struct nvif_object *device = &nouveau_drm(dev)->client.device.object; |
| 263 | struct nouveau_drm *drm = nouveau_drm(dev); | 263 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 264 | 264 | ||
| 265 | if (drm->device.info.chipset == 0x11) | 265 | if (drm->client.device.info.chipset == 0x11) |
| 266 | return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); | 266 | return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); |
| 267 | 267 | ||
| 268 | return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; | 268 | return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; |
| @@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock) | |||
| 318 | NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, | 318 | NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, |
| 319 | lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); | 319 | lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); |
| 320 | /* NV11 has independently lockable extended crtcs, except when tied */ | 320 | /* NV11 has independently lockable extended crtcs, except when tied */ |
| 321 | if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev)) | 321 | if (drm->client.device.info.chipset == 0x11 && !nv_heads_tied(dev)) |
| 322 | NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, | 322 | NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, |
| 323 | lock ? NV_CIO_SR_LOCK_VALUE : | 323 | lock ? NV_CIO_SR_LOCK_VALUE : |
| 324 | NV_CIO_SR_UNLOCK_RW_VALUE); | 324 | NV_CIO_SR_UNLOCK_RW_VALUE); |
| @@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev) | |||
| 335 | { | 335 | { |
| 336 | struct nouveau_drm *drm = nouveau_drm(dev); | 336 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 337 | 337 | ||
| 338 | return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; | 338 | return drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; |
| 339 | } | 339 | } |
| 340 | 340 | ||
| 341 | static inline void | 341 | static inline void |
| @@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) | |||
| 357 | 357 | ||
| 358 | NVWriteCRTC(dev, head, NV_PCRTC_START, offset); | 358 | NVWriteCRTC(dev, head, NV_PCRTC_START, offset); |
| 359 | 359 | ||
| 360 | if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) { | 360 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) { |
| 361 | /* | 361 | /* |
| 362 | * Hilarious, the 24th bit doesn't want to stick to | 362 | * Hilarious, the 24th bit doesn't want to stick to |
| 363 | * PCRTC_START... | 363 | * PCRTC_START... |
| @@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show) | |||
| 382 | *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); | 382 | *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); |
| 383 | NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); | 383 | NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); |
| 384 | 384 | ||
| 385 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 385 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 386 | nv_fix_nv40_hw_cursor(dev, head); | 386 | nv_fix_nv40_hw_cursor(dev, head); |
| 387 | } | 387 | } |
| 388 | 388 | ||
| @@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp) | |||
| 398 | bpp = 8; | 398 | bpp = 8; |
| 399 | 399 | ||
| 400 | /* Alignment requirements taken from the Haiku driver */ | 400 | /* Alignment requirements taken from the Haiku driver */ |
| 401 | if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) | 401 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) |
| 402 | mask = 128 / bpp - 1; | 402 | mask = 128 / bpp - 1; |
| 403 | else | 403 | else |
| 404 | mask = 512 / bpp - 1; | 404 | mask = 512 / bpp - 1; |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 6275c270df25..5319f2a7f24d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c | |||
| @@ -97,7 +97,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 97 | uint32_t src_w, uint32_t src_h) | 97 | uint32_t src_w, uint32_t src_h) |
| 98 | { | 98 | { |
| 99 | struct nouveau_drm *drm = nouveau_drm(plane->dev); | 99 | struct nouveau_drm *drm = nouveau_drm(plane->dev); |
| 100 | struct nvif_object *dev = &drm->device.object; | 100 | struct nvif_object *dev = &drm->client.device.object; |
| 101 | struct nouveau_plane *nv_plane = | 101 | struct nouveau_plane *nv_plane = |
| 102 | container_of(plane, struct nouveau_plane, base); | 102 | container_of(plane, struct nouveau_plane, base); |
| 103 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 103 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
| @@ -119,7 +119,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 119 | if (format > 0xffff) | 119 | if (format > 0xffff) |
| 120 | return -ERANGE; | 120 | return -ERANGE; |
| 121 | 121 | ||
| 122 | if (drm->device.info.chipset >= 0x30) { | 122 | if (drm->client.device.info.chipset >= 0x30) { |
| 123 | if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) | 123 | if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) |
| 124 | return -ERANGE; | 124 | return -ERANGE; |
| 125 | } else { | 125 | } else { |
| @@ -174,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 174 | static int | 174 | static int |
| 175 | nv10_disable_plane(struct drm_plane *plane) | 175 | nv10_disable_plane(struct drm_plane *plane) |
| 176 | { | 176 | { |
| 177 | struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; | 177 | struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object; |
| 178 | struct nouveau_plane *nv_plane = | 178 | struct nouveau_plane *nv_plane = |
| 179 | container_of(plane, struct nouveau_plane, base); | 179 | container_of(plane, struct nouveau_plane, base); |
| 180 | 180 | ||
| @@ -198,7 +198,7 @@ nv_destroy_plane(struct drm_plane *plane) | |||
| 198 | static void | 198 | static void |
| 199 | nv10_set_params(struct nouveau_plane *plane) | 199 | nv10_set_params(struct nouveau_plane *plane) |
| 200 | { | 200 | { |
| 201 | struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object; | 201 | struct nvif_object *dev = &nouveau_drm(plane->base.dev)->client.device.object; |
| 202 | u32 luma = (plane->brightness - 512) << 16 | plane->contrast; | 202 | u32 luma = (plane->brightness - 512) << 16 | plane->contrast; |
| 203 | u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | | 203 | u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | |
| 204 | (cos_mul(plane->hue, plane->saturation) & 0xffff); | 204 | (cos_mul(plane->hue, plane->saturation) & 0xffff); |
| @@ -268,7 +268,7 @@ nv10_overlay_init(struct drm_device *device) | |||
| 268 | if (!plane) | 268 | if (!plane) |
| 269 | return; | 269 | return; |
| 270 | 270 | ||
| 271 | switch (drm->device.info.chipset) { | 271 | switch (drm->client.device.info.chipset) { |
| 272 | case 0x10: | 272 | case 0x10: |
| 273 | case 0x11: | 273 | case 0x11: |
| 274 | case 0x15: | 274 | case 0x15: |
| @@ -347,7 +347,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 347 | uint32_t src_x, uint32_t src_y, | 347 | uint32_t src_x, uint32_t src_y, |
| 348 | uint32_t src_w, uint32_t src_h) | 348 | uint32_t src_w, uint32_t src_h) |
| 349 | { | 349 | { |
| 350 | struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; | 350 | struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object; |
| 351 | struct nouveau_plane *nv_plane = | 351 | struct nouveau_plane *nv_plane = |
| 352 | container_of(plane, struct nouveau_plane, base); | 352 | container_of(plane, struct nouveau_plane, base); |
| 353 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 353 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
| @@ -427,7 +427,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 427 | static int | 427 | static int |
| 428 | nv04_disable_plane(struct drm_plane *plane) | 428 | nv04_disable_plane(struct drm_plane *plane) |
| 429 | { | 429 | { |
| 430 | struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; | 430 | struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object; |
| 431 | struct nouveau_plane *nv_plane = | 431 | struct nouveau_plane *nv_plane = |
| 432 | container_of(plane, struct nouveau_plane, base); | 432 | container_of(plane, struct nouveau_plane, base); |
| 433 | 433 | ||
| @@ -495,7 +495,7 @@ err: | |||
| 495 | void | 495 | void |
| 496 | nouveau_overlay_init(struct drm_device *device) | 496 | nouveau_overlay_init(struct drm_device *device) |
| 497 | { | 497 | { |
| 498 | struct nvif_device *dev = &nouveau_drm(device)->device; | 498 | struct nvif_device *dev = &nouveau_drm(device)->client.device; |
| 499 | if (dev->info.chipset < 0x10) | 499 | if (dev->info.chipset < 0x10) |
| 500 | nv04_overlay_init(device); | 500 | nv04_overlay_init(device); |
| 501 | else if (dev->info.chipset <= 0x40) | 501 | else if (dev->info.chipset <= 0x40) |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index 477a8d072af4..01664357d3e1 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c | |||
| @@ -54,7 +54,7 @@ static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = { | |||
| 54 | int nv04_tv_identify(struct drm_device *dev, int i2c_index) | 54 | int nv04_tv_identify(struct drm_device *dev, int i2c_index) |
| 55 | { | 55 | { |
| 56 | struct nouveau_drm *drm = nouveau_drm(dev); | 56 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 57 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); | 57 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); |
| 58 | struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index); | 58 | struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index); |
| 59 | if (bus) { | 59 | if (bus) { |
| 60 | return nvkm_i2c_bus_probe(bus, "TV encoder", | 60 | return nvkm_i2c_bus_probe(bus, "TV encoder", |
| @@ -206,7 +206,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) | |||
| 206 | struct drm_encoder *encoder; | 206 | struct drm_encoder *encoder; |
| 207 | struct drm_device *dev = connector->dev; | 207 | struct drm_device *dev = connector->dev; |
| 208 | struct nouveau_drm *drm = nouveau_drm(dev); | 208 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 209 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); | 209 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); |
| 210 | struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index); | 210 | struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index); |
| 211 | int type, ret; | 211 | int type, ret; |
| 212 | 212 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 434d1e29f279..6d99f11fee4e 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c | |||
| @@ -46,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) | |||
| 46 | { | 46 | { |
| 47 | struct drm_device *dev = encoder->dev; | 47 | struct drm_device *dev = encoder->dev; |
| 48 | struct nouveau_drm *drm = nouveau_drm(dev); | 48 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 49 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); | 49 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); |
| 50 | uint32_t testval, regoffset = nv04_dac_output_offset(encoder); | 50 | uint32_t testval, regoffset = nv04_dac_output_offset(encoder); |
| 51 | uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, | 51 | uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, |
| 52 | fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; | 52 | fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; |
| @@ -130,7 +130,7 @@ static bool | |||
| 130 | get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) | 130 | get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) |
| 131 | { | 131 | { |
| 132 | struct nouveau_drm *drm = nouveau_drm(dev); | 132 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 133 | struct nvkm_device *device = nvxx_device(&drm->device); | 133 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 134 | 134 | ||
| 135 | if (device->quirk && device->quirk->tv_pin_mask) { | 135 | if (device->quirk && device->quirk->tv_pin_mask) { |
| 136 | *pin_mask = device->quirk->tv_pin_mask; | 136 | *pin_mask = device->quirk->tv_pin_mask; |
| @@ -154,8 +154,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) | |||
| 154 | return connector_status_disconnected; | 154 | return connector_status_disconnected; |
| 155 | 155 | ||
| 156 | if (reliable) { | 156 | if (reliable) { |
| 157 | if (drm->device.info.chipset == 0x42 || | 157 | if (drm->client.device.info.chipset == 0x42 || |
| 158 | drm->device.info.chipset == 0x43) | 158 | drm->client.device.info.chipset == 0x43) |
| 159 | tv_enc->pin_mask = | 159 | tv_enc->pin_mask = |
| 160 | nv42_tv_sample_load(encoder) >> 28 & 0xe; | 160 | nv42_tv_sample_load(encoder) >> 28 & 0xe; |
| 161 | else | 161 | else |
| @@ -362,7 +362,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) | |||
| 362 | { | 362 | { |
| 363 | struct drm_device *dev = encoder->dev; | 363 | struct drm_device *dev = encoder->dev; |
| 364 | struct nouveau_drm *drm = nouveau_drm(dev); | 364 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 365 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); | 365 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); |
| 366 | struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; | 366 | struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; |
| 367 | struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); | 367 | struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); |
| 368 | 368 | ||
| @@ -435,7 +435,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) | |||
| 435 | /* Set the DACCLK register */ | 435 | /* Set the DACCLK register */ |
| 436 | dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; | 436 | dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; |
| 437 | 437 | ||
| 438 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) | 438 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) |
| 439 | dacclk |= 0x1a << 16; | 439 | dacclk |= 0x1a << 16; |
| 440 | 440 | ||
| 441 | if (tv_norm->kind == CTV_ENC_MODE) { | 441 | if (tv_norm->kind == CTV_ENC_MODE) { |
| @@ -492,7 +492,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder, | |||
| 492 | tv_regs->ptv_614 = 0x13; | 492 | tv_regs->ptv_614 = 0x13; |
| 493 | } | 493 | } |
| 494 | 494 | ||
| 495 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) { | 495 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) { |
| 496 | tv_regs->ptv_500 = 0xe8e0; | 496 | tv_regs->ptv_500 = 0xe8e0; |
| 497 | tv_regs->ptv_504 = 0x1710; | 497 | tv_regs->ptv_504 = 0x1710; |
| 498 | tv_regs->ptv_604 = 0x0; | 498 | tv_regs->ptv_604 = 0x0; |
| @@ -587,7 +587,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder) | |||
| 587 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); | 587 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); |
| 588 | 588 | ||
| 589 | /* This could use refinement for flatpanels, but it should work */ | 589 | /* This could use refinement for flatpanels, but it should work */ |
| 590 | if (drm->device.info.chipset < 0x44) | 590 | if (drm->client.device.info.chipset < 0x44) |
| 591 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + | 591 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + |
| 592 | nv04_dac_output_offset(encoder), | 592 | nv04_dac_output_offset(encoder), |
| 593 | 0xf0000000); | 593 | 0xf0000000); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h index 1b07521cde0d..29773b325bd9 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h | |||
| @@ -130,13 +130,13 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder); | |||
| 130 | static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, | 130 | static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, |
| 131 | uint32_t val) | 131 | uint32_t val) |
| 132 | { | 132 | { |
| 133 | struct nvif_device *device = &nouveau_drm(dev)->device; | 133 | struct nvif_device *device = &nouveau_drm(dev)->client.device; |
| 134 | nvif_wr32(&device->object, reg, val); | 134 | nvif_wr32(&device->object, reg, val); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) | 137 | static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) |
| 138 | { | 138 | { |
| 139 | struct nvif_device *device = &nouveau_drm(dev)->device; | 139 | struct nvif_device *device = &nouveau_drm(dev)->client.device; |
| 140 | return nvif_rd32(&device->object, reg); | 140 | return nvif_rd32(&device->object, reg); |
| 141 | } | 141 | } |
| 142 | 142 | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h index 05e6ef7cd190..91e33db21a2f 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h | |||
| @@ -10,5 +10,5 @@ struct g82_channel_dma_v0 { | |||
| 10 | __u64 offset; | 10 | __u64 offset; |
| 11 | }; | 11 | }; |
| 12 | 12 | ||
| 13 | #define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00 | 13 | #define NV826E_V0_NTFY_NON_STALL_INTERRUPT 0x00 |
| 14 | #endif | 14 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h index cecafcb1e954..e34efd4ec537 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h | |||
| @@ -11,5 +11,5 @@ struct g82_channel_gpfifo_v0 { | |||
| 11 | __u64 vm; | 11 | __u64 vm; |
| 12 | }; | 12 | }; |
| 13 | 13 | ||
| 14 | #define G82_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00 | 14 | #define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00 |
| 15 | #endif | 15 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h index 2caf0838fcfd..a2d5410a491b 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h | |||
| @@ -10,5 +10,6 @@ struct fermi_channel_gpfifo_v0 { | |||
| 10 | __u64 vm; | 10 | __u64 vm; |
| 11 | }; | 11 | }; |
| 12 | 12 | ||
| 13 | #define FERMI_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00 | 13 | #define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00 |
| 14 | #define NV906F_V0_NTFY_KILLED 0x01 | ||
| 14 | #endif | 15 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h index 46301ec018ce..2efa3d048bb9 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h | |||
| @@ -25,5 +25,6 @@ struct kepler_channel_gpfifo_a_v0 { | |||
| 25 | __u64 vm; | 25 | __u64 vm; |
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | #define NVA06F_V0_NTFY_UEVENT 0x00 | 28 | #define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00 |
| 29 | #define NVA06F_V0_NTFY_KILLED 0x01 | ||
| 29 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 82235f30277c..3a2c0137d4b4 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h | |||
| @@ -2,23 +2,31 @@ | |||
| 2 | #define __NVIF_CLASS_H__ | 2 | #define __NVIF_CLASS_H__ |
| 3 | 3 | ||
| 4 | /* these class numbers are made up by us, and not nvidia-assigned */ | 4 | /* these class numbers are made up by us, and not nvidia-assigned */ |
| 5 | #define NVIF_CLASS_CONTROL /* if0001.h */ -1 | 5 | #define NVIF_CLASS_CLIENT /* if0000.h */ -0x00000000 |
| 6 | #define NVIF_CLASS_PERFMON /* if0002.h */ -2 | 6 | |
| 7 | #define NVIF_CLASS_PERFDOM /* if0003.h */ -3 | 7 | #define NVIF_CLASS_CONTROL /* if0001.h */ -0x00000001 |
| 8 | #define NVIF_CLASS_SW_NV04 /* if0004.h */ -4 | 8 | |
| 9 | #define NVIF_CLASS_SW_NV10 /* if0005.h */ -5 | 9 | #define NVIF_CLASS_PERFMON /* if0002.h */ -0x00000002 |
| 10 | #define NVIF_CLASS_SW_NV50 /* if0005.h */ -6 | 10 | #define NVIF_CLASS_PERFDOM /* if0003.h */ -0x00000003 |
| 11 | #define NVIF_CLASS_SW_GF100 /* if0005.h */ -7 | 11 | |
| 12 | #define NVIF_CLASS_SW_NV04 /* if0004.h */ -0x00000004 | ||
| 13 | #define NVIF_CLASS_SW_NV10 /* if0005.h */ -0x00000005 | ||
| 14 | #define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006 | ||
| 15 | #define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007 | ||
| 12 | 16 | ||
| 13 | /* the below match nvidia-assigned (either in hw, or sw) class numbers */ | 17 | /* the below match nvidia-assigned (either in hw, or sw) class numbers */ |
| 18 | #define NV_NULL_CLASS 0x00000030 | ||
| 19 | |||
| 14 | #define NV_DEVICE /* cl0080.h */ 0x00000080 | 20 | #define NV_DEVICE /* cl0080.h */ 0x00000080 |
| 15 | 21 | ||
| 16 | #define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002 | 22 | #define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002 |
| 17 | #define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003 | 23 | #define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003 |
| 18 | #define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d | 24 | #define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d |
| 19 | 25 | ||
| 26 | #define NV50_TWOD 0x0000502d | ||
| 20 | #define FERMI_TWOD_A 0x0000902d | 27 | #define FERMI_TWOD_A 0x0000902d |
| 21 | 28 | ||
| 29 | #define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 | ||
| 22 | #define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039 | 30 | #define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039 |
| 23 | 31 | ||
| 24 | #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 | 32 | #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 |
| @@ -99,6 +107,12 @@ | |||
| 99 | #define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e | 107 | #define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e |
| 100 | #define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e | 108 | #define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e |
| 101 | 109 | ||
| 110 | #define NV50_TESLA 0x00005097 | ||
| 111 | #define G82_TESLA 0x00008297 | ||
| 112 | #define GT200_TESLA 0x00008397 | ||
| 113 | #define GT214_TESLA 0x00008597 | ||
| 114 | #define GT21A_TESLA 0x00008697 | ||
| 115 | |||
| 102 | #define FERMI_A /* cl9097.h */ 0x00009097 | 116 | #define FERMI_A /* cl9097.h */ 0x00009097 |
| 103 | #define FERMI_B /* cl9097.h */ 0x00009197 | 117 | #define FERMI_B /* cl9097.h */ 0x00009197 |
| 104 | #define FERMI_C /* cl9097.h */ 0x00009297 | 118 | #define FERMI_C /* cl9097.h */ 0x00009297 |
| @@ -140,6 +154,8 @@ | |||
| 140 | 154 | ||
| 141 | #define FERMI_DECOMPRESS 0x000090b8 | 155 | #define FERMI_DECOMPRESS 0x000090b8 |
| 142 | 156 | ||
| 157 | #define NV50_COMPUTE 0x000050c0 | ||
| 158 | #define GT214_COMPUTE 0x000085c0 | ||
| 143 | #define FERMI_COMPUTE_A 0x000090c0 | 159 | #define FERMI_COMPUTE_A 0x000090c0 |
| 144 | #define FERMI_COMPUTE_B 0x000091c0 | 160 | #define FERMI_COMPUTE_B 0x000091c0 |
| 145 | #define KEPLER_COMPUTE_A 0x0000a0c0 | 161 | #define KEPLER_COMPUTE_A 0x0000a0c0 |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h index 4a7f6f7b836d..b52a8eadce01 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/client.h +++ b/drivers/gpu/drm/nouveau/include/nvif/client.h | |||
| @@ -11,8 +11,7 @@ struct nvif_client { | |||
| 11 | bool super; | 11 | bool super; |
| 12 | }; | 12 | }; |
| 13 | 13 | ||
| 14 | int nvif_client_init(const char *drv, const char *name, u64 device, | 14 | int nvif_client_init(struct nvif_client *parent, const char *name, u64 device, |
| 15 | const char *cfg, const char *dbg, | ||
| 16 | struct nvif_client *); | 15 | struct nvif_client *); |
| 17 | void nvif_client_fini(struct nvif_client *); | 16 | void nvif_client_fini(struct nvif_client *); |
| 18 | int nvif_client_ioctl(struct nvif_client *, void *, u32); | 17 | int nvif_client_ioctl(struct nvif_client *, void *, u32); |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h index 8bd39e69229c..0c6f48d8140a 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/driver.h +++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | #ifndef __NVIF_DRIVER_H__ | 1 | #ifndef __NVIF_DRIVER_H__ |
| 2 | #define __NVIF_DRIVER_H__ | 2 | #define __NVIF_DRIVER_H__ |
| 3 | #include <nvif/os.h> | ||
| 4 | struct nvif_client; | ||
| 3 | 5 | ||
| 4 | struct nvif_driver { | 6 | struct nvif_driver { |
| 5 | const char *name; | 7 | const char *name; |
| @@ -14,9 +16,11 @@ struct nvif_driver { | |||
| 14 | bool keep; | 16 | bool keep; |
| 15 | }; | 17 | }; |
| 16 | 18 | ||
| 19 | int nvif_driver_init(const char *drv, const char *cfg, const char *dbg, | ||
| 20 | const char *name, u64 device, struct nvif_client *); | ||
| 21 | |||
| 17 | extern const struct nvif_driver nvif_driver_nvkm; | 22 | extern const struct nvif_driver nvif_driver_nvkm; |
| 18 | extern const struct nvif_driver nvif_driver_drm; | 23 | extern const struct nvif_driver nvif_driver_drm; |
| 19 | extern const struct nvif_driver nvif_driver_lib; | 24 | extern const struct nvif_driver nvif_driver_lib; |
| 20 | extern const struct nvif_driver nvif_driver_null; | 25 | extern const struct nvif_driver nvif_driver_null; |
| 21 | |||
| 22 | #endif | 26 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h index 85c44e8a1201..c2c0fc41e017 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h +++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h | |||
| @@ -1,9 +1,16 @@ | |||
| 1 | #ifndef __NVIF_IF0000_H__ | 1 | #ifndef __NVIF_IF0000_H__ |
| 2 | #define __NVIF_IF0000_H__ | 2 | #define __NVIF_IF0000_H__ |
| 3 | 3 | ||
| 4 | #define NV_CLIENT_DEVLIST 0x00 | 4 | struct nvif_client_v0 { |
| 5 | __u8 version; | ||
| 6 | __u8 pad01[7]; | ||
| 7 | __u64 device; | ||
| 8 | char name[32]; | ||
| 9 | }; | ||
| 10 | |||
| 11 | #define NVIF_CLIENT_V0_DEVLIST 0x00 | ||
| 5 | 12 | ||
| 6 | struct nv_client_devlist_v0 { | 13 | struct nvif_client_devlist_v0 { |
| 7 | __u8 version; | 14 | __u8 version; |
| 8 | __u8 count; | 15 | __u8 count; |
| 9 | __u8 pad02[6]; | 16 | __u8 pad02[6]; |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h index eaf5905a87a3..e876634da10a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | #ifndef __NVKM_CLIENT_H__ | 1 | #ifndef __NVKM_CLIENT_H__ |
| 2 | #define __NVKM_CLIENT_H__ | 2 | #define __NVKM_CLIENT_H__ |
| 3 | #define nvkm_client(p) container_of((p), struct nvkm_client, object) | ||
| 3 | #include <core/object.h> | 4 | #include <core/object.h> |
| 4 | 5 | ||
| 5 | struct nvkm_client { | 6 | struct nvkm_client { |
| @@ -8,9 +9,8 @@ struct nvkm_client { | |||
| 8 | u64 device; | 9 | u64 device; |
| 9 | u32 debug; | 10 | u32 debug; |
| 10 | 11 | ||
| 11 | struct nvkm_client_notify *notify[16]; | 12 | struct nvkm_client_notify *notify[32]; |
| 12 | struct rb_root objroot; | 13 | struct rb_root objroot; |
| 13 | struct rb_root dmaroot; | ||
| 14 | 14 | ||
| 15 | bool super; | 15 | bool super; |
| 16 | void *data; | 16 | void *data; |
| @@ -19,15 +19,11 @@ struct nvkm_client { | |||
| 19 | struct nvkm_vm *vm; | 19 | struct nvkm_vm *vm; |
| 20 | }; | 20 | }; |
| 21 | 21 | ||
| 22 | bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *); | ||
| 23 | void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *); | ||
| 24 | struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object); | ||
| 25 | |||
| 26 | int nvkm_client_new(const char *name, u64 device, const char *cfg, | 22 | int nvkm_client_new(const char *name, u64 device, const char *cfg, |
| 27 | const char *dbg, struct nvkm_client **); | 23 | const char *dbg, |
| 28 | void nvkm_client_del(struct nvkm_client **); | 24 | int (*)(const void *, u32, const void *, u32), |
| 29 | int nvkm_client_init(struct nvkm_client *); | 25 | struct nvkm_client **); |
| 30 | int nvkm_client_fini(struct nvkm_client *, bool suspend); | 26 | struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle); |
| 31 | 27 | ||
| 32 | int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *, | 28 | int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *, |
| 33 | void *data, u32 size); | 29 | void *data, u32 size); |
| @@ -37,8 +33,8 @@ int nvkm_client_notify_put(struct nvkm_client *, int index); | |||
| 37 | 33 | ||
| 38 | /* logging for client-facing objects */ | 34 | /* logging for client-facing objects */ |
| 39 | #define nvif_printk(o,l,p,f,a...) do { \ | 35 | #define nvif_printk(o,l,p,f,a...) do { \ |
| 40 | struct nvkm_object *_object = (o); \ | 36 | const struct nvkm_object *_object = (o); \ |
| 41 | struct nvkm_client *_client = _object->client; \ | 37 | const struct nvkm_client *_client = _object->client; \ |
| 42 | if (_client->debug >= NV_DBG_##l) \ | 38 | if (_client->debug >= NV_DBG_##l) \ |
| 43 | printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \ | 39 | printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \ |
| 44 | _object->handle, _object->oclass, ##a); \ | 40 | _object->handle, _object->oclass, ##a); \ |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 6bc712f32c8b..d426b86e2712 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h | |||
| @@ -262,7 +262,7 @@ extern const struct nvkm_sclass nvkm_udevice_sclass; | |||
| 262 | 262 | ||
| 263 | /* device logging */ | 263 | /* device logging */ |
| 264 | #define nvdev_printk_(d,l,p,f,a...) do { \ | 264 | #define nvdev_printk_(d,l,p,f,a...) do { \ |
| 265 | struct nvkm_device *_device = (d); \ | 265 | const struct nvkm_device *_device = (d); \ |
| 266 | if (_device->debug >= (l)) \ | 266 | if (_device->debug >= (l)) \ |
| 267 | dev_##p(_device->dev, f, ##a); \ | 267 | dev_##p(_device->dev, f, ##a); \ |
| 268 | } while(0) | 268 | } while(0) |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h index 9ebfd8782366..d4cd2fbfde88 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h | |||
| @@ -20,6 +20,7 @@ struct nvkm_engine_func { | |||
| 20 | int (*fini)(struct nvkm_engine *, bool suspend); | 20 | int (*fini)(struct nvkm_engine *, bool suspend); |
| 21 | void (*intr)(struct nvkm_engine *); | 21 | void (*intr)(struct nvkm_engine *); |
| 22 | void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *); | 22 | void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *); |
| 23 | bool (*chsw_load)(struct nvkm_engine *); | ||
| 23 | 24 | ||
| 24 | struct { | 25 | struct { |
| 25 | int (*sclass)(struct nvkm_oclass *, int index, | 26 | int (*sclass)(struct nvkm_oclass *, int index, |
| @@ -44,4 +45,5 @@ int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *, | |||
| 44 | struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *); | 45 | struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *); |
| 45 | void nvkm_engine_unref(struct nvkm_engine **); | 46 | void nvkm_engine_unref(struct nvkm_engine **); |
| 46 | void nvkm_engine_tile(struct nvkm_engine *, int region); | 47 | void nvkm_engine_tile(struct nvkm_engine *, int region); |
| 48 | bool nvkm_engine_chsw_load(struct nvkm_engine *); | ||
| 47 | #endif | 49 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h index 9363b839a9da..33ca6769266a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h | |||
| @@ -6,9 +6,10 @@ struct nvkm_vma; | |||
| 6 | struct nvkm_vm; | 6 | struct nvkm_vm; |
| 7 | 7 | ||
| 8 | enum nvkm_memory_target { | 8 | enum nvkm_memory_target { |
| 9 | NVKM_MEM_TARGET_INST, | 9 | NVKM_MEM_TARGET_INST, /* instance memory */ |
| 10 | NVKM_MEM_TARGET_VRAM, | 10 | NVKM_MEM_TARGET_VRAM, /* video memory */ |
| 11 | NVKM_MEM_TARGET_HOST, | 11 | NVKM_MEM_TARGET_HOST, /* coherent system memory */ |
| 12 | NVKM_MEM_TARGET_NCOH, /* non-coherent system memory */ | ||
| 12 | }; | 13 | }; |
| 13 | 14 | ||
| 14 | struct nvkm_memory { | 15 | struct nvkm_memory { |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h index d92fd41e4056..7bd4897a8a2a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | struct nvkm_mm_node { | 5 | struct nvkm_mm_node { |
| 6 | struct list_head nl_entry; | 6 | struct list_head nl_entry; |
| 7 | struct list_head fl_entry; | 7 | struct list_head fl_entry; |
| 8 | struct list_head rl_entry; | 8 | struct nvkm_mm_node *next; |
| 9 | 9 | ||
| 10 | #define NVKM_MM_HEAP_ANY 0x00 | 10 | #define NVKM_MM_HEAP_ANY 0x00 |
| 11 | u8 heap; | 11 | u8 heap; |
| @@ -38,4 +38,10 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, | |||
| 38 | u32 size_min, u32 align, struct nvkm_mm_node **); | 38 | u32 size_min, u32 align, struct nvkm_mm_node **); |
| 39 | void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); | 39 | void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); |
| 40 | void nvkm_mm_dump(struct nvkm_mm *, const char *); | 40 | void nvkm_mm_dump(struct nvkm_mm *, const char *); |
| 41 | |||
| 42 | static inline bool | ||
| 43 | nvkm_mm_contiguous(struct nvkm_mm_node *node) | ||
| 44 | { | ||
| 45 | return !node->next; | ||
| 46 | } | ||
| 41 | #endif | 47 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h index dcd048b91fac..96dda350ada3 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h | |||
| @@ -62,6 +62,11 @@ int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data); | |||
| 62 | int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align, | 62 | int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align, |
| 63 | struct nvkm_gpuobj **); | 63 | struct nvkm_gpuobj **); |
| 64 | 64 | ||
| 65 | bool nvkm_object_insert(struct nvkm_object *); | ||
| 66 | void nvkm_object_remove(struct nvkm_object *); | ||
| 67 | struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object, | ||
| 68 | const struct nvkm_object_func *); | ||
| 69 | |||
| 65 | struct nvkm_sclass { | 70 | struct nvkm_sclass { |
| 66 | int minver; | 71 | int minver; |
| 67 | int maxver; | 72 | int maxver; |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h index 57adefa8b08e..ca9ed3d68f44 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h | |||
| @@ -32,7 +32,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *); | |||
| 32 | 32 | ||
| 33 | /* subdev logging */ | 33 | /* subdev logging */ |
| 34 | #define nvkm_printk_(s,l,p,f,a...) do { \ | 34 | #define nvkm_printk_(s,l,p,f,a...) do { \ |
| 35 | struct nvkm_subdev *_subdev = (s); \ | 35 | const struct nvkm_subdev *_subdev = (s); \ |
| 36 | if (_subdev->debug >= (l)) { \ | 36 | if (_subdev->debug >= (l)) { \ |
| 37 | dev_##p(_subdev->device->dev, "%s: "f, \ | 37 | dev_##p(_subdev->device->dev, "%s: "f, \ |
| 38 | nvkm_subdev_name[_subdev->index], ##a); \ | 38 | nvkm_subdev_name[_subdev->index], ##a); \ |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h index 114bfb737a81..d2a6532ce3b9 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h | |||
| @@ -12,9 +12,6 @@ struct nvkm_dmaobj { | |||
| 12 | u32 access; | 12 | u32 access; |
| 13 | u64 start; | 13 | u64 start; |
| 14 | u64 limit; | 14 | u64 limit; |
| 15 | |||
| 16 | struct rb_node rb; | ||
| 17 | u64 handle; /*XXX HANDLE MERGE */ | ||
| 18 | }; | 15 | }; |
| 19 | 16 | ||
| 20 | struct nvkm_dma { | 17 | struct nvkm_dma { |
| @@ -22,8 +19,7 @@ struct nvkm_dma { | |||
| 22 | struct nvkm_engine engine; | 19 | struct nvkm_engine engine; |
| 23 | }; | 20 | }; |
| 24 | 21 | ||
| 25 | struct nvkm_dmaobj * | 22 | struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object); |
| 26 | nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object); | ||
| 27 | 23 | ||
| 28 | int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **); | 24 | int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **); |
| 29 | int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **); | 25 | int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **); |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h index e6baf039c269..7e498e65b1e8 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h | |||
| @@ -4,13 +4,26 @@ | |||
| 4 | #include <core/engine.h> | 4 | #include <core/engine.h> |
| 5 | struct nvkm_fifo_chan; | 5 | struct nvkm_fifo_chan; |
| 6 | 6 | ||
| 7 | enum nvkm_falcon_dmaidx { | ||
| 8 | FALCON_DMAIDX_UCODE = 0, | ||
| 9 | FALCON_DMAIDX_VIRT = 1, | ||
| 10 | FALCON_DMAIDX_PHYS_VID = 2, | ||
| 11 | FALCON_DMAIDX_PHYS_SYS_COH = 3, | ||
| 12 | FALCON_DMAIDX_PHYS_SYS_NCOH = 4, | ||
| 13 | }; | ||
| 14 | |||
| 7 | struct nvkm_falcon { | 15 | struct nvkm_falcon { |
| 8 | const struct nvkm_falcon_func *func; | 16 | const struct nvkm_falcon_func *func; |
| 9 | struct nvkm_engine engine; | 17 | const struct nvkm_subdev *owner; |
| 10 | 18 | const char *name; | |
| 11 | u32 addr; | 19 | u32 addr; |
| 12 | u8 version; | 20 | |
| 13 | u8 secret; | 21 | struct mutex mutex; |
| 22 | const struct nvkm_subdev *user; | ||
| 23 | |||
| 24 | u8 version; | ||
| 25 | u8 secret; | ||
| 26 | bool debug; | ||
| 14 | 27 | ||
| 15 | struct nvkm_memory *core; | 28 | struct nvkm_memory *core; |
| 16 | bool external; | 29 | bool external; |
| @@ -19,15 +32,25 @@ struct nvkm_falcon { | |||
| 19 | u32 limit; | 32 | u32 limit; |
| 20 | u32 *data; | 33 | u32 *data; |
| 21 | u32 size; | 34 | u32 size; |
| 35 | u8 ports; | ||
| 22 | } code; | 36 | } code; |
| 23 | 37 | ||
| 24 | struct { | 38 | struct { |
| 25 | u32 limit; | 39 | u32 limit; |
| 26 | u32 *data; | 40 | u32 *data; |
| 27 | u32 size; | 41 | u32 size; |
| 42 | u8 ports; | ||
| 28 | } data; | 43 | } data; |
| 44 | |||
| 45 | struct nvkm_engine engine; | ||
| 29 | }; | 46 | }; |
| 30 | 47 | ||
| 48 | int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr, | ||
| 49 | struct nvkm_falcon **); | ||
| 50 | void nvkm_falcon_del(struct nvkm_falcon **); | ||
| 51 | int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *); | ||
| 52 | void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *); | ||
| 53 | |||
| 31 | int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *, | 54 | int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *, |
| 32 | int index, bool enable, u32 addr, struct nvkm_engine **); | 55 | int index, bool enable, u32 addr, struct nvkm_engine **); |
| 33 | 56 | ||
| @@ -42,6 +65,51 @@ struct nvkm_falcon_func { | |||
| 42 | } data; | 65 | } data; |
| 43 | void (*init)(struct nvkm_falcon *); | 66 | void (*init)(struct nvkm_falcon *); |
| 44 | void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *); | 67 | void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *); |
| 68 | void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool); | ||
| 69 | void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8); | ||
| 70 | void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *); | ||
| 71 | void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *); | ||
| 72 | int (*wait_for_halt)(struct nvkm_falcon *, u32); | ||
| 73 | int (*clear_interrupt)(struct nvkm_falcon *, u32); | ||
| 74 | void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr); | ||
| 75 | void (*start)(struct nvkm_falcon *); | ||
| 76 | int (*enable)(struct nvkm_falcon *falcon); | ||
| 77 | void (*disable)(struct nvkm_falcon *falcon); | ||
| 78 | |||
| 45 | struct nvkm_sclass sclass[]; | 79 | struct nvkm_sclass sclass[]; |
| 46 | }; | 80 | }; |
| 81 | |||
| 82 | static inline u32 | ||
| 83 | nvkm_falcon_rd32(struct nvkm_falcon *falcon, u32 addr) | ||
| 84 | { | ||
| 85 | return nvkm_rd32(falcon->owner->device, falcon->addr + addr); | ||
| 86 | } | ||
| 87 | |||
| 88 | static inline void | ||
| 89 | nvkm_falcon_wr32(struct nvkm_falcon *falcon, u32 addr, u32 data) | ||
| 90 | { | ||
| 91 | nvkm_wr32(falcon->owner->device, falcon->addr + addr, data); | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline u32 | ||
| 95 | nvkm_falcon_mask(struct nvkm_falcon *falcon, u32 addr, u32 mask, u32 val) | ||
| 96 | { | ||
| 97 | struct nvkm_device *device = falcon->owner->device; | ||
| 98 | |||
| 99 | return nvkm_mask(device, falcon->addr + addr, mask, val); | ||
| 100 | } | ||
| 101 | |||
| 102 | void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8, | ||
| 103 | bool); | ||
| 104 | void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8); | ||
| 105 | void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *); | ||
| 106 | void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *); | ||
| 107 | void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32); | ||
| 108 | void nvkm_falcon_start(struct nvkm_falcon *); | ||
| 109 | int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32); | ||
| 110 | int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32); | ||
| 111 | int nvkm_falcon_enable(struct nvkm_falcon *); | ||
| 112 | void nvkm_falcon_disable(struct nvkm_falcon *); | ||
| 113 | int nvkm_falcon_reset(struct nvkm_falcon *); | ||
| 114 | |||
| 47 | #endif | 115 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index ed92fec5292c..24efa900d8ca 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h | |||
| @@ -40,6 +40,7 @@ struct nvkm_fifo { | |||
| 40 | 40 | ||
| 41 | struct nvkm_event uevent; /* async user trigger */ | 41 | struct nvkm_event uevent; /* async user trigger */ |
| 42 | struct nvkm_event cevent; /* channel creation event */ | 42 | struct nvkm_event cevent; /* channel creation event */ |
| 43 | struct nvkm_event kevent; /* channel killed */ | ||
| 43 | }; | 44 | }; |
| 44 | 45 | ||
| 45 | void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *); | 46 | void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *); |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h new file mode 100644 index 000000000000..f5f4a14c4030 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | #ifndef __NVBIOS_POWER_BUDGET_H__ | ||
| 2 | #define __NVBIOS_POWER_BUDGET_H__ | ||
| 3 | |||
| 4 | #include <nvkm/subdev/bios.h> | ||
| 5 | |||
| 6 | struct nvbios_power_budget_entry { | ||
| 7 | u32 min_w; | ||
| 8 | u32 avg_w; | ||
| 9 | u32 max_w; | ||
| 10 | }; | ||
| 11 | |||
| 12 | struct nvbios_power_budget { | ||
| 13 | u32 offset; | ||
| 14 | u8 ver; | ||
| 15 | u8 hlen; | ||
| 16 | u8 elen; | ||
| 17 | u8 ecount; | ||
| 18 | u8 cap_entry; | ||
| 19 | }; | ||
| 20 | |||
| 21 | int nvbios_power_budget_header(struct nvkm_bios *, | ||
| 22 | struct nvbios_power_budget *); | ||
| 23 | int nvbios_power_budget_entry(struct nvkm_bios *, struct nvbios_power_budget *, | ||
| 24 | u8 idx, struct nvbios_power_budget_entry *); | ||
| 25 | |||
| 26 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 794e432578b2..0b26a4c860ec 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h | |||
| @@ -29,7 +29,7 @@ struct nvkm_mem { | |||
| 29 | u8 page_shift; | 29 | u8 page_shift; |
| 30 | 30 | ||
| 31 | struct nvkm_mm_node *tag; | 31 | struct nvkm_mm_node *tag; |
| 32 | struct list_head regions; | 32 | struct nvkm_mm_node *mem; |
| 33 | dma_addr_t *pages; | 33 | dma_addr_t *pages; |
| 34 | u32 memtype; | 34 | u32 memtype; |
| 35 | u64 offset; | 35 | u64 offset; |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h index 3c2ddd975273..b7a9b041e130 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h | |||
| @@ -8,6 +8,9 @@ struct nvkm_iccsense { | |||
| 8 | bool data_valid; | 8 | bool data_valid; |
| 9 | struct list_head sensors; | 9 | struct list_head sensors; |
| 10 | struct list_head rails; | 10 | struct list_head rails; |
| 11 | |||
| 12 | u32 power_w_max; | ||
| 13 | u32 power_w_crit; | ||
| 11 | }; | 14 | }; |
| 12 | 15 | ||
| 13 | int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **); | 16 | int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **); |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h index 27d25b18d85c..e68ba636741b 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h | |||
| @@ -9,6 +9,7 @@ struct nvkm_mc { | |||
| 9 | 9 | ||
| 10 | void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx); | 10 | void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx); |
| 11 | void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx); | 11 | void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx); |
| 12 | bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx); | ||
| 12 | void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx); | 13 | void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx); |
| 13 | void nvkm_mc_intr(struct nvkm_device *, bool *handled); | 14 | void nvkm_mc_intr(struct nvkm_device *, bool *handled); |
| 14 | void nvkm_mc_intr_unarm(struct nvkm_device *); | 15 | void nvkm_mc_intr_unarm(struct nvkm_device *); |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h index e6523e2cea9f..ac2a695963c1 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h | |||
| @@ -43,6 +43,7 @@ int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **); | |||
| 43 | int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **); | 43 | int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **); |
| 44 | int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **); | 44 | int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **); |
| 45 | int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **); | 45 | int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **); |
| 46 | int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **); | ||
| 46 | int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **); | 47 | int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **); |
| 47 | int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); | 48 | int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); |
| 48 | int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **); | 49 | int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **); |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h index f37538eb1fe5..179b6ed3f595 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h | |||
| @@ -1,10 +1,12 @@ | |||
| 1 | #ifndef __NVKM_PMU_H__ | 1 | #ifndef __NVKM_PMU_H__ |
| 2 | #define __NVKM_PMU_H__ | 2 | #define __NVKM_PMU_H__ |
| 3 | #include <core/subdev.h> | 3 | #include <core/subdev.h> |
| 4 | #include <engine/falcon.h> | ||
| 4 | 5 | ||
| 5 | struct nvkm_pmu { | 6 | struct nvkm_pmu { |
| 6 | const struct nvkm_pmu_func *func; | 7 | const struct nvkm_pmu_func *func; |
| 7 | struct nvkm_subdev subdev; | 8 | struct nvkm_subdev subdev; |
| 9 | struct nvkm_falcon *falcon; | ||
| 8 | 10 | ||
| 9 | struct { | 11 | struct { |
| 10 | u32 base; | 12 | u32 base; |
| @@ -35,6 +37,7 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); | |||
| 35 | int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); | 37 | int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); |
| 36 | int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); | 38 | int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); |
| 37 | int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); | 39 | int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); |
| 40 | int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); | ||
| 38 | int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); | 41 | int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); |
| 39 | int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); | 42 | int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); |
| 40 | 43 | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h index b04c38c07761..5dbd8aa4f8c2 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | #include <core/subdev.h> | 26 | #include <core/subdev.h> |
| 27 | 27 | ||
| 28 | enum nvkm_secboot_falcon { | 28 | enum nvkm_secboot_falcon { |
| 29 | NVKM_SECBOOT_FALCON_PMU = 0, | 29 | NVKM_SECBOOT_FALCON_PMU = 0, |
| 30 | NVKM_SECBOOT_FALCON_RESERVED = 1, | 30 | NVKM_SECBOOT_FALCON_RESERVED = 1, |
| 31 | NVKM_SECBOOT_FALCON_FECS = 2, | 31 | NVKM_SECBOOT_FALCON_FECS = 2, |
| 32 | NVKM_SECBOOT_FALCON_GPCCS = 3, | 32 | NVKM_SECBOOT_FALCON_GPCCS = 3, |
| @@ -35,22 +35,23 @@ enum nvkm_secboot_falcon { | |||
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | /** | 37 | /** |
| 38 | * @base: base IO address of the falcon performing secure boot | 38 | * @wpr_set: whether the WPR region is currently set |
| 39 | * @irq_mask: IRQ mask of the falcon performing secure boot | ||
| 40 | * @enable_mask: enable mask of the falcon performing secure boot | ||
| 41 | */ | 39 | */ |
| 42 | struct nvkm_secboot { | 40 | struct nvkm_secboot { |
| 43 | const struct nvkm_secboot_func *func; | 41 | const struct nvkm_secboot_func *func; |
| 42 | struct nvkm_acr *acr; | ||
| 44 | struct nvkm_subdev subdev; | 43 | struct nvkm_subdev subdev; |
| 44 | struct nvkm_falcon *boot_falcon; | ||
| 45 | 45 | ||
| 46 | enum nvkm_devidx devidx; | 46 | u64 wpr_addr; |
| 47 | u32 base; | 47 | u32 wpr_size; |
| 48 | |||
| 49 | bool wpr_set; | ||
| 48 | }; | 50 | }; |
| 49 | #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev) | 51 | #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev) |
| 50 | 52 | ||
| 51 | bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon); | 53 | bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon); |
| 52 | int nvkm_secboot_reset(struct nvkm_secboot *, u32 falcon); | 54 | int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon); |
| 53 | int nvkm_secboot_start(struct nvkm_secboot *, u32 falcon); | ||
| 54 | 55 | ||
| 55 | int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); | 56 | int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); |
| 56 | int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); | 57 | int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h index 82d3e28918fd..6a567fe347b3 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h | |||
| @@ -48,10 +48,8 @@ void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *); | |||
| 48 | } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \ | 48 | } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \ |
| 49 | \ | 49 | \ |
| 50 | if (_taken >= _nsecs) { \ | 50 | if (_taken >= _nsecs) { \ |
| 51 | if (_warn) { \ | 51 | if (_warn) \ |
| 52 | dev_warn(_device->dev, "timeout at %s:%d/%s()!\n", \ | 52 | dev_WARN(_device->dev, "timeout\n"); \ |
| 53 | __FILE__, __LINE__, __func__); \ | ||
| 54 | } \ | ||
| 55 | _taken = -ETIMEDOUT; \ | 53 | _taken = -ETIMEDOUT; \ |
| 56 | } \ | 54 | } \ |
| 57 | _taken; \ | 55 | _taken; \ |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h index 71ebbfd4484f..d23209b62c25 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h | |||
| @@ -11,6 +11,7 @@ struct nvkm_top { | |||
| 11 | u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx); | 11 | u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx); |
| 12 | u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs); | 12 | u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs); |
| 13 | u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx); | 13 | u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx); |
| 14 | int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx); | ||
| 14 | enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault); | 15 | enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault); |
| 15 | enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn); | 16 | enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn); |
| 16 | 17 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 4df4f6ed4886..f98f800cc011 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
| @@ -87,7 +87,7 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) | |||
| 87 | s32 | 87 | s32 |
| 88 | nouveau_abi16_swclass(struct nouveau_drm *drm) | 88 | nouveau_abi16_swclass(struct nouveau_drm *drm) |
| 89 | { | 89 | { |
| 90 | switch (drm->device.info.family) { | 90 | switch (drm->client.device.info.family) { |
| 91 | case NV_DEVICE_INFO_V0_TNT: | 91 | case NV_DEVICE_INFO_V0_TNT: |
| 92 | return NVIF_CLASS_SW_NV04; | 92 | return NVIF_CLASS_SW_NV04; |
| 93 | case NV_DEVICE_INFO_V0_CELSIUS: | 93 | case NV_DEVICE_INFO_V0_CELSIUS: |
| @@ -175,7 +175,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) | |||
| 175 | { | 175 | { |
| 176 | struct nouveau_cli *cli = nouveau_cli(file_priv); | 176 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 177 | struct nouveau_drm *drm = nouveau_drm(dev); | 177 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 178 | struct nvif_device *device = &drm->device; | 178 | struct nvif_device *device = &drm->client.device; |
| 179 | struct nvkm_gr *gr = nvxx_gr(device); | 179 | struct nvkm_gr *gr = nvxx_gr(device); |
| 180 | struct drm_nouveau_getparam *getparam = data; | 180 | struct drm_nouveau_getparam *getparam = data; |
| 181 | 181 | ||
| @@ -321,7 +321,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | /* Named memory object area */ | 323 | /* Named memory object area */ |
| 324 | ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, | 324 | ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, |
| 325 | 0, 0, &chan->ntfy); | 325 | 0, 0, &chan->ntfy); |
| 326 | if (ret == 0) | 326 | if (ret == 0) |
| 327 | ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false); | 327 | ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 8b1ca4add2ed..380f340204e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
| @@ -65,7 +65,7 @@ static int | |||
| 65 | nv40_get_intensity(struct backlight_device *bd) | 65 | nv40_get_intensity(struct backlight_device *bd) |
| 66 | { | 66 | { |
| 67 | struct nouveau_drm *drm = bl_get_data(bd); | 67 | struct nouveau_drm *drm = bl_get_data(bd); |
| 68 | struct nvif_object *device = &drm->device.object; | 68 | struct nvif_object *device = &drm->client.device.object; |
| 69 | int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) & | 69 | int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) & |
| 70 | NV40_PMC_BACKLIGHT_MASK) >> 16; | 70 | NV40_PMC_BACKLIGHT_MASK) >> 16; |
| 71 | 71 | ||
| @@ -76,7 +76,7 @@ static int | |||
| 76 | nv40_set_intensity(struct backlight_device *bd) | 76 | nv40_set_intensity(struct backlight_device *bd) |
| 77 | { | 77 | { |
| 78 | struct nouveau_drm *drm = bl_get_data(bd); | 78 | struct nouveau_drm *drm = bl_get_data(bd); |
| 79 | struct nvif_object *device = &drm->device.object; | 79 | struct nvif_object *device = &drm->client.device.object; |
| 80 | int val = bd->props.brightness; | 80 | int val = bd->props.brightness; |
| 81 | int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT); | 81 | int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT); |
| 82 | 82 | ||
| @@ -96,7 +96,7 @@ static int | |||
| 96 | nv40_backlight_init(struct drm_connector *connector) | 96 | nv40_backlight_init(struct drm_connector *connector) |
| 97 | { | 97 | { |
| 98 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 98 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
| 99 | struct nvif_object *device = &drm->device.object; | 99 | struct nvif_object *device = &drm->client.device.object; |
| 100 | struct backlight_properties props; | 100 | struct backlight_properties props; |
| 101 | struct backlight_device *bd; | 101 | struct backlight_device *bd; |
| 102 | struct backlight_connector bl_connector; | 102 | struct backlight_connector bl_connector; |
| @@ -133,7 +133,7 @@ nv50_get_intensity(struct backlight_device *bd) | |||
| 133 | { | 133 | { |
| 134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
| 135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
| 136 | struct nvif_object *device = &drm->device.object; | 136 | struct nvif_object *device = &drm->client.device.object; |
| 137 | int or = nv_encoder->or; | 137 | int or = nv_encoder->or; |
| 138 | u32 div = 1025; | 138 | u32 div = 1025; |
| 139 | u32 val; | 139 | u32 val; |
| @@ -148,7 +148,7 @@ nv50_set_intensity(struct backlight_device *bd) | |||
| 148 | { | 148 | { |
| 149 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 149 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
| 150 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 150 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
| 151 | struct nvif_object *device = &drm->device.object; | 151 | struct nvif_object *device = &drm->client.device.object; |
| 152 | int or = nv_encoder->or; | 152 | int or = nv_encoder->or; |
| 153 | u32 div = 1025; | 153 | u32 div = 1025; |
| 154 | u32 val = (bd->props.brightness * div) / 100; | 154 | u32 val = (bd->props.brightness * div) / 100; |
| @@ -169,7 +169,7 @@ nva3_get_intensity(struct backlight_device *bd) | |||
| 169 | { | 169 | { |
| 170 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 170 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
| 171 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 171 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
| 172 | struct nvif_object *device = &drm->device.object; | 172 | struct nvif_object *device = &drm->client.device.object; |
| 173 | int or = nv_encoder->or; | 173 | int or = nv_encoder->or; |
| 174 | u32 div, val; | 174 | u32 div, val; |
| 175 | 175 | ||
| @@ -187,7 +187,7 @@ nva3_set_intensity(struct backlight_device *bd) | |||
| 187 | { | 187 | { |
| 188 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 188 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
| 189 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 189 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
| 190 | struct nvif_object *device = &drm->device.object; | 190 | struct nvif_object *device = &drm->client.device.object; |
| 191 | int or = nv_encoder->or; | 191 | int or = nv_encoder->or; |
| 192 | u32 div, val; | 192 | u32 div, val; |
| 193 | 193 | ||
| @@ -213,7 +213,7 @@ static int | |||
| 213 | nv50_backlight_init(struct drm_connector *connector) | 213 | nv50_backlight_init(struct drm_connector *connector) |
| 214 | { | 214 | { |
| 215 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 215 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
| 216 | struct nvif_object *device = &drm->device.object; | 216 | struct nvif_object *device = &drm->client.device.object; |
| 217 | struct nouveau_encoder *nv_encoder; | 217 | struct nouveau_encoder *nv_encoder; |
| 218 | struct backlight_properties props; | 218 | struct backlight_properties props; |
| 219 | struct backlight_device *bd; | 219 | struct backlight_device *bd; |
| @@ -231,9 +231,9 @@ nv50_backlight_init(struct drm_connector *connector) | |||
| 231 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) | 231 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) |
| 232 | return 0; | 232 | return 0; |
| 233 | 233 | ||
| 234 | if (drm->device.info.chipset <= 0xa0 || | 234 | if (drm->client.device.info.chipset <= 0xa0 || |
| 235 | drm->device.info.chipset == 0xaa || | 235 | drm->client.device.info.chipset == 0xaa || |
| 236 | drm->device.info.chipset == 0xac) | 236 | drm->client.device.info.chipset == 0xac) |
| 237 | ops = &nv50_bl_ops; | 237 | ops = &nv50_bl_ops; |
| 238 | else | 238 | else |
| 239 | ops = &nva3_bl_ops; | 239 | ops = &nva3_bl_ops; |
| @@ -265,7 +265,7 @@ int | |||
| 265 | nouveau_backlight_init(struct drm_device *dev) | 265 | nouveau_backlight_init(struct drm_device *dev) |
| 266 | { | 266 | { |
| 267 | struct nouveau_drm *drm = nouveau_drm(dev); | 267 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 268 | struct nvif_device *device = &drm->device; | 268 | struct nvif_device *device = &drm->client.device; |
| 269 | struct drm_connector *connector; | 269 | struct drm_connector *connector; |
| 270 | 270 | ||
| 271 | if (apple_gmux_present()) { | 271 | if (apple_gmux_present()) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 23ffe8571a99..9a0772ad495a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -215,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head | |||
| 215 | */ | 215 | */ |
| 216 | 216 | ||
| 217 | struct nouveau_drm *drm = nouveau_drm(dev); | 217 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 218 | struct nvif_object *device = &drm->device.object; | 218 | struct nvif_object *device = &drm->client.device.object; |
| 219 | struct nvbios *bios = &drm->vbios; | 219 | struct nvbios *bios = &drm->vbios; |
| 220 | uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; | 220 | uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; |
| 221 | uint32_t sel_clk_binding, sel_clk; | 221 | uint32_t sel_clk_binding, sel_clk; |
| @@ -319,7 +319,7 @@ static int | |||
| 319 | get_fp_strap(struct drm_device *dev, struct nvbios *bios) | 319 | get_fp_strap(struct drm_device *dev, struct nvbios *bios) |
| 320 | { | 320 | { |
| 321 | struct nouveau_drm *drm = nouveau_drm(dev); | 321 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 322 | struct nvif_object *device = &drm->device.object; | 322 | struct nvif_object *device = &drm->client.device.object; |
| 323 | 323 | ||
| 324 | /* | 324 | /* |
| 325 | * The fp strap is normally dictated by the "User Strap" in | 325 | * The fp strap is normally dictated by the "User Strap" in |
| @@ -333,10 +333,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios) | |||
| 333 | if (bios->major_version < 5 && bios->data[0x48] & 0x4) | 333 | if (bios->major_version < 5 && bios->data[0x48] & 0x4) |
| 334 | return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; | 334 | return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; |
| 335 | 335 | ||
| 336 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL) | 336 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_MAXWELL) |
| 337 | return nvif_rd32(device, 0x001800) & 0x0000000f; | 337 | return nvif_rd32(device, 0x001800) & 0x0000000f; |
| 338 | else | 338 | else |
| 339 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) | 339 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
| 340 | return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; | 340 | return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; |
| 341 | else | 341 | else |
| 342 | return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; | 342 | return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; |
| @@ -638,7 +638,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, | |||
| 638 | */ | 638 | */ |
| 639 | 639 | ||
| 640 | struct nouveau_drm *drm = nouveau_drm(dev); | 640 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 641 | struct nvif_object *device = &drm->device.object; | 641 | struct nvif_object *device = &drm->client.device.object; |
| 642 | struct nvbios *bios = &drm->vbios; | 642 | struct nvbios *bios = &drm->vbios; |
| 643 | int cv = bios->chip_version; | 643 | int cv = bios->chip_version; |
| 644 | uint16_t clktable = 0, scriptptr; | 644 | uint16_t clktable = 0, scriptptr; |
| @@ -1255,7 +1255,7 @@ olddcb_table(struct drm_device *dev) | |||
| 1255 | struct nouveau_drm *drm = nouveau_drm(dev); | 1255 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 1256 | u8 *dcb = NULL; | 1256 | u8 *dcb = NULL; |
| 1257 | 1257 | ||
| 1258 | if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT) | 1258 | if (drm->client.device.info.family > NV_DEVICE_INFO_V0_TNT) |
| 1259 | dcb = ROMPTR(dev, drm->vbios.data[0x36]); | 1259 | dcb = ROMPTR(dev, drm->vbios.data[0x36]); |
| 1260 | if (!dcb) { | 1260 | if (!dcb) { |
| 1261 | NV_WARN(drm, "No DCB data found in VBIOS\n"); | 1261 | NV_WARN(drm, "No DCB data found in VBIOS\n"); |
| @@ -1918,7 +1918,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio | |||
| 1918 | */ | 1918 | */ |
| 1919 | 1919 | ||
| 1920 | struct nouveau_drm *drm = nouveau_drm(dev); | 1920 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 1921 | struct nvif_object *device = &drm->device.object; | 1921 | struct nvif_object *device = &drm->client.device.object; |
| 1922 | uint8_t bytes_to_write; | 1922 | uint8_t bytes_to_write; |
| 1923 | uint16_t hwsq_entry_offset; | 1923 | uint16_t hwsq_entry_offset; |
| 1924 | int i; | 1924 | int i; |
| @@ -2012,7 +2012,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) | |||
| 2012 | static bool NVInitVBIOS(struct drm_device *dev) | 2012 | static bool NVInitVBIOS(struct drm_device *dev) |
| 2013 | { | 2013 | { |
| 2014 | struct nouveau_drm *drm = nouveau_drm(dev); | 2014 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 2015 | struct nvkm_bios *bios = nvxx_bios(&drm->device); | 2015 | struct nvkm_bios *bios = nvxx_bios(&drm->client.device); |
| 2016 | struct nvbios *legacy = &drm->vbios; | 2016 | struct nvbios *legacy = &drm->vbios; |
| 2017 | 2017 | ||
| 2018 | memset(legacy, 0, sizeof(struct nvbios)); | 2018 | memset(legacy, 0, sizeof(struct nvbios)); |
| @@ -2064,7 +2064,7 @@ nouveau_bios_posted(struct drm_device *dev) | |||
| 2064 | struct nouveau_drm *drm = nouveau_drm(dev); | 2064 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 2065 | unsigned htotal; | 2065 | unsigned htotal; |
| 2066 | 2066 | ||
| 2067 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) | 2067 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
| 2068 | return true; | 2068 | return true; |
| 2069 | 2069 | ||
| 2070 | htotal = NVReadVgaCrtc(dev, 0, 0x06); | 2070 | htotal = NVReadVgaCrtc(dev, 0, 0x06); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 8a528ebe30f3..548f36d33924 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -48,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, | |||
| 48 | { | 48 | { |
| 49 | struct nouveau_drm *drm = nouveau_drm(dev); | 49 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 50 | int i = reg - drm->tile.reg; | 50 | int i = reg - drm->tile.reg; |
| 51 | struct nvkm_device *device = nvxx_device(&drm->device); | 51 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 52 | struct nvkm_fb *fb = device->fb; | 52 | struct nvkm_fb *fb = device->fb; |
| 53 | struct nvkm_fb_tile *tile = &fb->tile.region[i]; | 53 | struct nvkm_fb_tile *tile = &fb->tile.region[i]; |
| 54 | 54 | ||
| @@ -100,7 +100,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, | |||
| 100 | u32 size, u32 pitch, u32 flags) | 100 | u32 size, u32 pitch, u32 flags) |
| 101 | { | 101 | { |
| 102 | struct nouveau_drm *drm = nouveau_drm(dev); | 102 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 103 | struct nvkm_fb *fb = nvxx_fb(&drm->device); | 103 | struct nvkm_fb *fb = nvxx_fb(&drm->client.device); |
| 104 | struct nouveau_drm_tile *tile, *found = NULL; | 104 | struct nouveau_drm_tile *tile, *found = NULL; |
| 105 | int i; | 105 | int i; |
| 106 | 106 | ||
| @@ -139,60 +139,62 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
| 139 | kfree(nvbo); | 139 | kfree(nvbo); |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | static inline u64 | ||
| 143 | roundup_64(u64 x, u32 y) | ||
| 144 | { | ||
| 145 | x += y - 1; | ||
| 146 | do_div(x, y); | ||
| 147 | return x * y; | ||
| 148 | } | ||
| 149 | |||
| 142 | static void | 150 | static void |
| 143 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, | 151 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
| 144 | int *align, int *size) | 152 | int *align, u64 *size) |
| 145 | { | 153 | { |
| 146 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 154 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 147 | struct nvif_device *device = &drm->device; | 155 | struct nvif_device *device = &drm->client.device; |
| 148 | 156 | ||
| 149 | if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { | 157 | if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { |
| 150 | if (nvbo->tile_mode) { | 158 | if (nvbo->tile_mode) { |
| 151 | if (device->info.chipset >= 0x40) { | 159 | if (device->info.chipset >= 0x40) { |
| 152 | *align = 65536; | 160 | *align = 65536; |
| 153 | *size = roundup(*size, 64 * nvbo->tile_mode); | 161 | *size = roundup_64(*size, 64 * nvbo->tile_mode); |
| 154 | 162 | ||
| 155 | } else if (device->info.chipset >= 0x30) { | 163 | } else if (device->info.chipset >= 0x30) { |
| 156 | *align = 32768; | 164 | *align = 32768; |
| 157 | *size = roundup(*size, 64 * nvbo->tile_mode); | 165 | *size = roundup_64(*size, 64 * nvbo->tile_mode); |
| 158 | 166 | ||
| 159 | } else if (device->info.chipset >= 0x20) { | 167 | } else if (device->info.chipset >= 0x20) { |
| 160 | *align = 16384; | 168 | *align = 16384; |
| 161 | *size = roundup(*size, 64 * nvbo->tile_mode); | 169 | *size = roundup_64(*size, 64 * nvbo->tile_mode); |
| 162 | 170 | ||
| 163 | } else if (device->info.chipset >= 0x10) { | 171 | } else if (device->info.chipset >= 0x10) { |
| 164 | *align = 16384; | 172 | *align = 16384; |
| 165 | *size = roundup(*size, 32 * nvbo->tile_mode); | 173 | *size = roundup_64(*size, 32 * nvbo->tile_mode); |
| 166 | } | 174 | } |
| 167 | } | 175 | } |
| 168 | } else { | 176 | } else { |
| 169 | *size = roundup(*size, (1 << nvbo->page_shift)); | 177 | *size = roundup_64(*size, (1 << nvbo->page_shift)); |
| 170 | *align = max((1 << nvbo->page_shift), *align); | 178 | *align = max((1 << nvbo->page_shift), *align); |
| 171 | } | 179 | } |
| 172 | 180 | ||
| 173 | *size = roundup(*size, PAGE_SIZE); | 181 | *size = roundup_64(*size, PAGE_SIZE); |
| 174 | } | 182 | } |
| 175 | 183 | ||
| 176 | int | 184 | int |
| 177 | nouveau_bo_new(struct drm_device *dev, int size, int align, | 185 | nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, |
| 178 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, | 186 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, |
| 179 | struct sg_table *sg, struct reservation_object *robj, | 187 | struct sg_table *sg, struct reservation_object *robj, |
| 180 | struct nouveau_bo **pnvbo) | 188 | struct nouveau_bo **pnvbo) |
| 181 | { | 189 | { |
| 182 | struct nouveau_drm *drm = nouveau_drm(dev); | 190 | struct nouveau_drm *drm = nouveau_drm(cli->dev); |
| 183 | struct nouveau_bo *nvbo; | 191 | struct nouveau_bo *nvbo; |
| 184 | size_t acc_size; | 192 | size_t acc_size; |
| 185 | int ret; | 193 | int ret; |
| 186 | int type = ttm_bo_type_device; | 194 | int type = ttm_bo_type_device; |
| 187 | int lpg_shift = 12; | ||
| 188 | int max_size; | ||
| 189 | |||
| 190 | if (drm->client.vm) | ||
| 191 | lpg_shift = drm->client.vm->mmu->lpg_shift; | ||
| 192 | max_size = INT_MAX & ~((1 << lpg_shift) - 1); | ||
| 193 | 195 | ||
| 194 | if (size <= 0 || size > max_size) { | 196 | if (!size) { |
| 195 | NV_WARN(drm, "skipped size %x\n", (u32)size); | 197 | NV_WARN(drm, "skipped size %016llx\n", size); |
| 196 | return -EINVAL; | 198 | return -EINVAL; |
| 197 | } | 199 | } |
| 198 | 200 | ||
| @@ -208,8 +210,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
| 208 | nvbo->tile_mode = tile_mode; | 210 | nvbo->tile_mode = tile_mode; |
| 209 | nvbo->tile_flags = tile_flags; | 211 | nvbo->tile_flags = tile_flags; |
| 210 | nvbo->bo.bdev = &drm->ttm.bdev; | 212 | nvbo->bo.bdev = &drm->ttm.bdev; |
| 213 | nvbo->cli = cli; | ||
| 211 | 214 | ||
| 212 | if (!nvxx_device(&drm->device)->func->cpu_coherent) | 215 | if (!nvxx_device(&drm->client.device)->func->cpu_coherent) |
| 213 | nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; | 216 | nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; |
| 214 | 217 | ||
| 215 | nvbo->page_shift = 12; | 218 | nvbo->page_shift = 12; |
| @@ -255,10 +258,10 @@ static void | |||
| 255 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | 258 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
| 256 | { | 259 | { |
| 257 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 260 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 258 | u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; | 261 | u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; |
| 259 | unsigned i, fpfn, lpfn; | 262 | unsigned i, fpfn, lpfn; |
| 260 | 263 | ||
| 261 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && | 264 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && |
| 262 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && | 265 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
| 263 | nvbo->bo.mem.num_pages < vram_pages / 4) { | 266 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
| 264 | /* | 267 | /* |
| @@ -316,12 +319,12 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) | |||
| 316 | if (ret) | 319 | if (ret) |
| 317 | return ret; | 320 | return ret; |
| 318 | 321 | ||
| 319 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA && | 322 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && |
| 320 | memtype == TTM_PL_FLAG_VRAM && contig) { | 323 | memtype == TTM_PL_FLAG_VRAM && contig) { |
| 321 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { | 324 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { |
| 322 | if (bo->mem.mem_type == TTM_PL_VRAM) { | 325 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
| 323 | struct nvkm_mem *mem = bo->mem.mm_node; | 326 | struct nvkm_mem *mem = bo->mem.mm_node; |
| 324 | if (!list_is_singular(&mem->regions)) | 327 | if (!nvkm_mm_contiguous(mem->mem)) |
| 325 | evict = true; | 328 | evict = true; |
| 326 | } | 329 | } |
| 327 | nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; | 330 | nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; |
| @@ -443,7 +446,7 @@ void | |||
| 443 | nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) | 446 | nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) |
| 444 | { | 447 | { |
| 445 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 448 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 446 | struct nvkm_device *device = nvxx_device(&drm->device); | 449 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 447 | struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; | 450 | struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; |
| 448 | int i; | 451 | int i; |
| 449 | 452 | ||
| @@ -463,7 +466,7 @@ void | |||
| 463 | nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) | 466 | nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) |
| 464 | { | 467 | { |
| 465 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 468 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 466 | struct nvkm_device *device = nvxx_device(&drm->device); | 469 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 467 | struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; | 470 | struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; |
| 468 | int i; | 471 | int i; |
| 469 | 472 | ||
| @@ -579,9 +582,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 579 | TTM_PL_FLAG_WC; | 582 | TTM_PL_FLAG_WC; |
| 580 | man->default_caching = TTM_PL_FLAG_WC; | 583 | man->default_caching = TTM_PL_FLAG_WC; |
| 581 | 584 | ||
| 582 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { | 585 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
| 583 | /* Some BARs do not support being ioremapped WC */ | 586 | /* Some BARs do not support being ioremapped WC */ |
| 584 | if (nvxx_bar(&drm->device)->iomap_uncached) { | 587 | if (nvxx_bar(&drm->client.device)->iomap_uncached) { |
| 585 | man->available_caching = TTM_PL_FLAG_UNCACHED; | 588 | man->available_caching = TTM_PL_FLAG_UNCACHED; |
| 586 | man->default_caching = TTM_PL_FLAG_UNCACHED; | 589 | man->default_caching = TTM_PL_FLAG_UNCACHED; |
| 587 | } | 590 | } |
| @@ -594,7 +597,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 594 | } | 597 | } |
| 595 | break; | 598 | break; |
| 596 | case TTM_PL_TT: | 599 | case TTM_PL_TT: |
| 597 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) | 600 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
| 598 | man->func = &nouveau_gart_manager; | 601 | man->func = &nouveau_gart_manager; |
| 599 | else | 602 | else |
| 600 | if (!drm->agp.bridge) | 603 | if (!drm->agp.bridge) |
| @@ -654,20 +657,20 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
| 654 | 657 | ||
| 655 | static int | 658 | static int |
| 656 | nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 659 | nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 657 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 660 | struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
| 658 | { | 661 | { |
| 659 | struct nvkm_mem *node = old_mem->mm_node; | 662 | struct nvkm_mem *mem = old_reg->mm_node; |
| 660 | int ret = RING_SPACE(chan, 10); | 663 | int ret = RING_SPACE(chan, 10); |
| 661 | if (ret == 0) { | 664 | if (ret == 0) { |
| 662 | BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); | 665 | BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); |
| 663 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); | 666 | OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); |
| 664 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | 667 | OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); |
| 665 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | 668 | OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); |
| 666 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | 669 | OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); |
| 667 | OUT_RING (chan, PAGE_SIZE); | 670 | OUT_RING (chan, PAGE_SIZE); |
| 668 | OUT_RING (chan, PAGE_SIZE); | 671 | OUT_RING (chan, PAGE_SIZE); |
| 669 | OUT_RING (chan, PAGE_SIZE); | 672 | OUT_RING (chan, PAGE_SIZE); |
| 670 | OUT_RING (chan, new_mem->num_pages); | 673 | OUT_RING (chan, new_reg->num_pages); |
| 671 | BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); | 674 | BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); |
| 672 | } | 675 | } |
| 673 | return ret; | 676 | return ret; |
| @@ -686,15 +689,15 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
| 686 | 689 | ||
| 687 | static int | 690 | static int |
| 688 | nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 691 | nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 689 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 692 | struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
| 690 | { | 693 | { |
| 691 | struct nvkm_mem *node = old_mem->mm_node; | 694 | struct nvkm_mem *mem = old_reg->mm_node; |
| 692 | u64 src_offset = node->vma[0].offset; | 695 | u64 src_offset = mem->vma[0].offset; |
| 693 | u64 dst_offset = node->vma[1].offset; | 696 | u64 dst_offset = mem->vma[1].offset; |
| 694 | u32 page_count = new_mem->num_pages; | 697 | u32 page_count = new_reg->num_pages; |
| 695 | int ret; | 698 | int ret; |
| 696 | 699 | ||
| 697 | page_count = new_mem->num_pages; | 700 | page_count = new_reg->num_pages; |
| 698 | while (page_count) { | 701 | while (page_count) { |
| 699 | int line_count = (page_count > 8191) ? 8191 : page_count; | 702 | int line_count = (page_count > 8191) ? 8191 : page_count; |
| 700 | 703 | ||
| @@ -724,15 +727,15 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 724 | 727 | ||
| 725 | static int | 728 | static int |
| 726 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 729 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 727 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 730 | struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
| 728 | { | 731 | { |
| 729 | struct nvkm_mem *node = old_mem->mm_node; | 732 | struct nvkm_mem *mem = old_reg->mm_node; |
| 730 | u64 src_offset = node->vma[0].offset; | 733 | u64 src_offset = mem->vma[0].offset; |
| 731 | u64 dst_offset = node->vma[1].offset; | 734 | u64 dst_offset = mem->vma[1].offset; |
| 732 | u32 page_count = new_mem->num_pages; | 735 | u32 page_count = new_reg->num_pages; |
| 733 | int ret; | 736 | int ret; |
| 734 | 737 | ||
| 735 | page_count = new_mem->num_pages; | 738 | page_count = new_reg->num_pages; |
| 736 | while (page_count) { | 739 | while (page_count) { |
| 737 | int line_count = (page_count > 2047) ? 2047 : page_count; | 740 | int line_count = (page_count > 2047) ? 2047 : page_count; |
| 738 | 741 | ||
| @@ -763,15 +766,15 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 763 | 766 | ||
| 764 | static int | 767 | static int |
| 765 | nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 768 | nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 766 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 769 | struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
| 767 | { | 770 | { |
| 768 | struct nvkm_mem *node = old_mem->mm_node; | 771 | struct nvkm_mem *mem = old_reg->mm_node; |
| 769 | u64 src_offset = node->vma[0].offset; | 772 | u64 src_offset = mem->vma[0].offset; |
| 770 | u64 dst_offset = node->vma[1].offset; | 773 | u64 dst_offset = mem->vma[1].offset; |
| 771 | u32 page_count = new_mem->num_pages; | 774 | u32 page_count = new_reg->num_pages; |
| 772 | int ret; | 775 | int ret; |
| 773 | 776 | ||
| 774 | page_count = new_mem->num_pages; | 777 | page_count = new_reg->num_pages; |
| 775 | while (page_count) { | 778 | while (page_count) { |
| 776 | int line_count = (page_count > 8191) ? 8191 : page_count; | 779 | int line_count = (page_count > 8191) ? 8191 : page_count; |
| 777 | 780 | ||
| @@ -801,35 +804,35 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 801 | 804 | ||
| 802 | static int | 805 | static int |
| 803 | nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 806 | nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 804 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 807 | struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
| 805 | { | 808 | { |
| 806 | struct nvkm_mem *node = old_mem->mm_node; | 809 | struct nvkm_mem *mem = old_reg->mm_node; |
| 807 | int ret = RING_SPACE(chan, 7); | 810 | int ret = RING_SPACE(chan, 7); |
| 808 | if (ret == 0) { | 811 | if (ret == 0) { |
| 809 | BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); | 812 | BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); |
| 810 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); | 813 | OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); |
| 811 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | 814 | OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); |
| 812 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | 815 | OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); |
| 813 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | 816 | OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); |
| 814 | OUT_RING (chan, 0x00000000 /* COPY */); | 817 | OUT_RING (chan, 0x00000000 /* COPY */); |
| 815 | OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); | 818 | OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); |
| 816 | } | 819 | } |
| 817 | return ret; | 820 | return ret; |
| 818 | } | 821 | } |
| 819 | 822 | ||
| 820 | static int | 823 | static int |
| 821 | nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 824 | nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 822 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 825 | struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
| 823 | { | 826 | { |
| 824 | struct nvkm_mem *node = old_mem->mm_node; | 827 | struct nvkm_mem *mem = old_reg->mm_node; |
| 825 | int ret = RING_SPACE(chan, 7); | 828 | int ret = RING_SPACE(chan, 7); |
| 826 | if (ret == 0) { | 829 | if (ret == 0) { |
| 827 | BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); | 830 | BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); |
| 828 | OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); | 831 | OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); |
| 829 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); | 832 | OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); |
| 830 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | 833 | OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); |
| 831 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | 834 | OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); |
| 832 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | 835 | OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); |
| 833 | OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); | 836 | OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); |
| 834 | } | 837 | } |
| 835 | return ret; | 838 | return ret; |
| @@ -853,14 +856,14 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
| 853 | 856 | ||
| 854 | static int | 857 | static int |
| 855 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 858 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 856 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 859 | struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
| 857 | { | 860 | { |
| 858 | struct nvkm_mem *node = old_mem->mm_node; | 861 | struct nvkm_mem *mem = old_reg->mm_node; |
| 859 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | 862 | u64 length = (new_reg->num_pages << PAGE_SHIFT); |
| 860 | u64 src_offset = node->vma[0].offset; | 863 | u64 src_offset = mem->vma[0].offset; |
| 861 | u64 dst_offset = node->vma[1].offset; | 864 | u64 dst_offset = mem->vma[1].offset; |
| 862 | int src_tiled = !!node->memtype; | 865 | int src_tiled = !!mem->memtype; |
| 863 | int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype; | 866 | int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype; |
| 864 | int ret; | 867 | int ret; |
| 865 | 868 | ||
| 866 | while (length) { | 869 | while (length) { |
| @@ -940,20 +943,20 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
| 940 | 943 | ||
| 941 | static inline uint32_t | 944 | static inline uint32_t |
| 942 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, | 945 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, |
| 943 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | 946 | struct nouveau_channel *chan, struct ttm_mem_reg *reg) |
| 944 | { | 947 | { |
| 945 | if (mem->mem_type == TTM_PL_TT) | 948 | if (reg->mem_type == TTM_PL_TT) |
| 946 | return NvDmaTT; | 949 | return NvDmaTT; |
| 947 | return chan->vram.handle; | 950 | return chan->vram.handle; |
| 948 | } | 951 | } |
| 949 | 952 | ||
| 950 | static int | 953 | static int |
| 951 | nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 954 | nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 952 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 955 | struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
| 953 | { | 956 | { |
| 954 | u32 src_offset = old_mem->start << PAGE_SHIFT; | 957 | u32 src_offset = old_reg->start << PAGE_SHIFT; |
| 955 | u32 dst_offset = new_mem->start << PAGE_SHIFT; | 958 | u32 dst_offset = new_reg->start << PAGE_SHIFT; |
| 956 | u32 page_count = new_mem->num_pages; | 959 | u32 page_count = new_reg->num_pages; |
| 957 | int ret; | 960 | int ret; |
| 958 | 961 | ||
| 959 | ret = RING_SPACE(chan, 3); | 962 | ret = RING_SPACE(chan, 3); |
| @@ -961,10 +964,10 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 961 | return ret; | 964 | return ret; |
| 962 | 965 | ||
| 963 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); | 966 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); |
| 964 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); | 967 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg)); |
| 965 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | 968 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg)); |
| 966 | 969 | ||
| 967 | page_count = new_mem->num_pages; | 970 | page_count = new_reg->num_pages; |
| 968 | while (page_count) { | 971 | while (page_count) { |
| 969 | int line_count = (page_count > 2047) ? 2047 : page_count; | 972 | int line_count = (page_count > 2047) ? 2047 : page_count; |
| 970 | 973 | ||
| @@ -995,33 +998,33 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
| 995 | 998 | ||
| 996 | static int | 999 | static int |
| 997 | nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, | 1000 | nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, |
| 998 | struct ttm_mem_reg *mem) | 1001 | struct ttm_mem_reg *reg) |
| 999 | { | 1002 | { |
| 1000 | struct nvkm_mem *old_node = bo->mem.mm_node; | 1003 | struct nvkm_mem *old_mem = bo->mem.mm_node; |
| 1001 | struct nvkm_mem *new_node = mem->mm_node; | 1004 | struct nvkm_mem *new_mem = reg->mm_node; |
| 1002 | u64 size = (u64)mem->num_pages << PAGE_SHIFT; | 1005 | u64 size = (u64)reg->num_pages << PAGE_SHIFT; |
| 1003 | int ret; | 1006 | int ret; |
| 1004 | 1007 | ||
| 1005 | ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift, | 1008 | ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift, |
| 1006 | NV_MEM_ACCESS_RW, &old_node->vma[0]); | 1009 | NV_MEM_ACCESS_RW, &old_mem->vma[0]); |
| 1007 | if (ret) | 1010 | if (ret) |
| 1008 | return ret; | 1011 | return ret; |
| 1009 | 1012 | ||
| 1010 | ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift, | 1013 | ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift, |
| 1011 | NV_MEM_ACCESS_RW, &old_node->vma[1]); | 1014 | NV_MEM_ACCESS_RW, &old_mem->vma[1]); |
| 1012 | if (ret) { | 1015 | if (ret) { |
| 1013 | nvkm_vm_put(&old_node->vma[0]); | 1016 | nvkm_vm_put(&old_mem->vma[0]); |
| 1014 | return ret; | 1017 | return ret; |
| 1015 | } | 1018 | } |
| 1016 | 1019 | ||
| 1017 | nvkm_vm_map(&old_node->vma[0], old_node); | 1020 | nvkm_vm_map(&old_mem->vma[0], old_mem); |
| 1018 | nvkm_vm_map(&old_node->vma[1], new_node); | 1021 | nvkm_vm_map(&old_mem->vma[1], new_mem); |
| 1019 | return 0; | 1022 | return 0; |
| 1020 | } | 1023 | } |
| 1021 | 1024 | ||
| 1022 | static int | 1025 | static int |
| 1023 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | 1026 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
| 1024 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) | 1027 | bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
| 1025 | { | 1028 | { |
| 1026 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1029 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 1027 | struct nouveau_channel *chan = drm->ttm.chan; | 1030 | struct nouveau_channel *chan = drm->ttm.chan; |
| @@ -1033,8 +1036,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
| 1033 | * old nvkm_mem node, these will get cleaned up after ttm has | 1036 | * old nvkm_mem node, these will get cleaned up after ttm has |
| 1034 | * destroyed the ttm_mem_reg | 1037 | * destroyed the ttm_mem_reg |
| 1035 | */ | 1038 | */ |
| 1036 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { | 1039 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
| 1037 | ret = nouveau_bo_move_prep(drm, bo, new_mem); | 1040 | ret = nouveau_bo_move_prep(drm, bo, new_reg); |
| 1038 | if (ret) | 1041 | if (ret) |
| 1039 | return ret; | 1042 | return ret; |
| 1040 | } | 1043 | } |
| @@ -1042,14 +1045,14 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
| 1042 | mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); | 1045 | mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); |
| 1043 | ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); | 1046 | ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); |
| 1044 | if (ret == 0) { | 1047 | if (ret == 0) { |
| 1045 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); | 1048 | ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); |
| 1046 | if (ret == 0) { | 1049 | if (ret == 0) { |
| 1047 | ret = nouveau_fence_new(chan, false, &fence); | 1050 | ret = nouveau_fence_new(chan, false, &fence); |
| 1048 | if (ret == 0) { | 1051 | if (ret == 0) { |
| 1049 | ret = ttm_bo_move_accel_cleanup(bo, | 1052 | ret = ttm_bo_move_accel_cleanup(bo, |
| 1050 | &fence->base, | 1053 | &fence->base, |
| 1051 | evict, | 1054 | evict, |
| 1052 | new_mem); | 1055 | new_reg); |
| 1053 | nouveau_fence_unref(&fence); | 1056 | nouveau_fence_unref(&fence); |
| 1054 | } | 1057 | } |
| 1055 | } | 1058 | } |
| @@ -1124,7 +1127,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
| 1124 | 1127 | ||
| 1125 | static int | 1128 | static int |
| 1126 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | 1129 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, |
| 1127 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) | 1130 | bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
| 1128 | { | 1131 | { |
| 1129 | struct ttm_place placement_memtype = { | 1132 | struct ttm_place placement_memtype = { |
| 1130 | .fpfn = 0, | 1133 | .fpfn = 0, |
| @@ -1132,35 +1135,35 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 1132 | .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING | 1135 | .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING |
| 1133 | }; | 1136 | }; |
| 1134 | struct ttm_placement placement; | 1137 | struct ttm_placement placement; |
| 1135 | struct ttm_mem_reg tmp_mem; | 1138 | struct ttm_mem_reg tmp_reg; |
| 1136 | int ret; | 1139 | int ret; |
| 1137 | 1140 | ||
| 1138 | placement.num_placement = placement.num_busy_placement = 1; | 1141 | placement.num_placement = placement.num_busy_placement = 1; |
| 1139 | placement.placement = placement.busy_placement = &placement_memtype; | 1142 | placement.placement = placement.busy_placement = &placement_memtype; |
| 1140 | 1143 | ||
| 1141 | tmp_mem = *new_mem; | 1144 | tmp_reg = *new_reg; |
| 1142 | tmp_mem.mm_node = NULL; | 1145 | tmp_reg.mm_node = NULL; |
| 1143 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); | 1146 | ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu); |
| 1144 | if (ret) | 1147 | if (ret) |
| 1145 | return ret; | 1148 | return ret; |
| 1146 | 1149 | ||
| 1147 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); | 1150 | ret = ttm_tt_bind(bo->ttm, &tmp_reg); |
| 1148 | if (ret) | 1151 | if (ret) |
| 1149 | goto out; | 1152 | goto out; |
| 1150 | 1153 | ||
| 1151 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); | 1154 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg); |
| 1152 | if (ret) | 1155 | if (ret) |
| 1153 | goto out; | 1156 | goto out; |
| 1154 | 1157 | ||
| 1155 | ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem); | 1158 | ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg); |
| 1156 | out: | 1159 | out: |
| 1157 | ttm_bo_mem_put(bo, &tmp_mem); | 1160 | ttm_bo_mem_put(bo, &tmp_reg); |
| 1158 | return ret; | 1161 | return ret; |
| 1159 | } | 1162 | } |
| 1160 | 1163 | ||
| 1161 | static int | 1164 | static int |
| 1162 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | 1165 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, |
| 1163 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) | 1166 | bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
| 1164 | { | 1167 | { |
| 1165 | struct ttm_place placement_memtype = { | 1168 | struct ttm_place placement_memtype = { |
| 1166 | .fpfn = 0, | 1169 | .fpfn = 0, |
| @@ -1168,34 +1171,34 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 1168 | .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING | 1171 | .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING |
| 1169 | }; | 1172 | }; |
| 1170 | struct ttm_placement placement; | 1173 | struct ttm_placement placement; |
| 1171 | struct ttm_mem_reg tmp_mem; | 1174 | struct ttm_mem_reg tmp_reg; |
| 1172 | int ret; | 1175 | int ret; |
| 1173 | 1176 | ||
| 1174 | placement.num_placement = placement.num_busy_placement = 1; | 1177 | placement.num_placement = placement.num_busy_placement = 1; |
| 1175 | placement.placement = placement.busy_placement = &placement_memtype; | 1178 | placement.placement = placement.busy_placement = &placement_memtype; |
| 1176 | 1179 | ||
| 1177 | tmp_mem = *new_mem; | 1180 | tmp_reg = *new_reg; |
| 1178 | tmp_mem.mm_node = NULL; | 1181 | tmp_reg.mm_node = NULL; |
| 1179 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); | 1182 | ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu); |
| 1180 | if (ret) | 1183 | if (ret) |
| 1181 | return ret; | 1184 | return ret; |
| 1182 | 1185 | ||
| 1183 | ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem); | 1186 | ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg); |
| 1184 | if (ret) | 1187 | if (ret) |
| 1185 | goto out; | 1188 | goto out; |
| 1186 | 1189 | ||
| 1187 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); | 1190 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg); |
| 1188 | if (ret) | 1191 | if (ret) |
| 1189 | goto out; | 1192 | goto out; |
| 1190 | 1193 | ||
| 1191 | out: | 1194 | out: |
| 1192 | ttm_bo_mem_put(bo, &tmp_mem); | 1195 | ttm_bo_mem_put(bo, &tmp_reg); |
| 1193 | return ret; | 1196 | return ret; |
| 1194 | } | 1197 | } |
| 1195 | 1198 | ||
| 1196 | static void | 1199 | static void |
| 1197 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, | 1200 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, |
| 1198 | struct ttm_mem_reg *new_mem) | 1201 | struct ttm_mem_reg *new_reg) |
| 1199 | { | 1202 | { |
| 1200 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1203 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1201 | struct nvkm_vma *vma; | 1204 | struct nvkm_vma *vma; |
| @@ -1205,10 +1208,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, | |||
| 1205 | return; | 1208 | return; |
| 1206 | 1209 | ||
| 1207 | list_for_each_entry(vma, &nvbo->vma_list, head) { | 1210 | list_for_each_entry(vma, &nvbo->vma_list, head) { |
| 1208 | if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM && | 1211 | if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM && |
| 1209 | (new_mem->mem_type == TTM_PL_VRAM || | 1212 | (new_reg->mem_type == TTM_PL_VRAM || |
| 1210 | nvbo->page_shift != vma->vm->mmu->lpg_shift)) { | 1213 | nvbo->page_shift != vma->vm->mmu->lpg_shift)) { |
| 1211 | nvkm_vm_map(vma, new_mem->mm_node); | 1214 | nvkm_vm_map(vma, new_reg->mm_node); |
| 1212 | } else { | 1215 | } else { |
| 1213 | WARN_ON(ttm_bo_wait(bo, false, false)); | 1216 | WARN_ON(ttm_bo_wait(bo, false, false)); |
| 1214 | nvkm_vm_unmap(vma); | 1217 | nvkm_vm_unmap(vma); |
| @@ -1217,20 +1220,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, | |||
| 1217 | } | 1220 | } |
| 1218 | 1221 | ||
| 1219 | static int | 1222 | static int |
| 1220 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | 1223 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, |
| 1221 | struct nouveau_drm_tile **new_tile) | 1224 | struct nouveau_drm_tile **new_tile) |
| 1222 | { | 1225 | { |
| 1223 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1226 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 1224 | struct drm_device *dev = drm->dev; | 1227 | struct drm_device *dev = drm->dev; |
| 1225 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1228 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1226 | u64 offset = new_mem->start << PAGE_SHIFT; | 1229 | u64 offset = new_reg->start << PAGE_SHIFT; |
| 1227 | 1230 | ||
| 1228 | *new_tile = NULL; | 1231 | *new_tile = NULL; |
| 1229 | if (new_mem->mem_type != TTM_PL_VRAM) | 1232 | if (new_reg->mem_type != TTM_PL_VRAM) |
| 1230 | return 0; | 1233 | return 0; |
| 1231 | 1234 | ||
| 1232 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { | 1235 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
| 1233 | *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, | 1236 | *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size, |
| 1234 | nvbo->tile_mode, | 1237 | nvbo->tile_mode, |
| 1235 | nvbo->tile_flags); | 1238 | nvbo->tile_flags); |
| 1236 | } | 1239 | } |
| @@ -1253,11 +1256,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |||
| 1253 | 1256 | ||
| 1254 | static int | 1257 | static int |
| 1255 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | 1258 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, |
| 1256 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) | 1259 | bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
| 1257 | { | 1260 | { |
| 1258 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1261 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 1259 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1262 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1260 | struct ttm_mem_reg *old_mem = &bo->mem; | 1263 | struct ttm_mem_reg *old_reg = &bo->mem; |
| 1261 | struct nouveau_drm_tile *new_tile = NULL; | 1264 | struct nouveau_drm_tile *new_tile = NULL; |
| 1262 | int ret = 0; | 1265 | int ret = 0; |
| 1263 | 1266 | ||
| @@ -1268,31 +1271,31 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 1268 | if (nvbo->pin_refcnt) | 1271 | if (nvbo->pin_refcnt) |
| 1269 | NV_WARN(drm, "Moving pinned object %p!\n", nvbo); | 1272 | NV_WARN(drm, "Moving pinned object %p!\n", nvbo); |
| 1270 | 1273 | ||
| 1271 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { | 1274 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
| 1272 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | 1275 | ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); |
| 1273 | if (ret) | 1276 | if (ret) |
| 1274 | return ret; | 1277 | return ret; |
| 1275 | } | 1278 | } |
| 1276 | 1279 | ||
| 1277 | /* Fake bo copy. */ | 1280 | /* Fake bo copy. */ |
| 1278 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { | 1281 | if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
| 1279 | BUG_ON(bo->mem.mm_node != NULL); | 1282 | BUG_ON(bo->mem.mm_node != NULL); |
| 1280 | bo->mem = *new_mem; | 1283 | bo->mem = *new_reg; |
| 1281 | new_mem->mm_node = NULL; | 1284 | new_reg->mm_node = NULL; |
| 1282 | goto out; | 1285 | goto out; |
| 1283 | } | 1286 | } |
| 1284 | 1287 | ||
| 1285 | /* Hardware assisted copy. */ | 1288 | /* Hardware assisted copy. */ |
| 1286 | if (drm->ttm.move) { | 1289 | if (drm->ttm.move) { |
| 1287 | if (new_mem->mem_type == TTM_PL_SYSTEM) | 1290 | if (new_reg->mem_type == TTM_PL_SYSTEM) |
| 1288 | ret = nouveau_bo_move_flipd(bo, evict, intr, | 1291 | ret = nouveau_bo_move_flipd(bo, evict, intr, |
| 1289 | no_wait_gpu, new_mem); | 1292 | no_wait_gpu, new_reg); |
| 1290 | else if (old_mem->mem_type == TTM_PL_SYSTEM) | 1293 | else if (old_reg->mem_type == TTM_PL_SYSTEM) |
| 1291 | ret = nouveau_bo_move_flips(bo, evict, intr, | 1294 | ret = nouveau_bo_move_flips(bo, evict, intr, |
| 1292 | no_wait_gpu, new_mem); | 1295 | no_wait_gpu, new_reg); |
| 1293 | else | 1296 | else |
| 1294 | ret = nouveau_bo_move_m2mf(bo, evict, intr, | 1297 | ret = nouveau_bo_move_m2mf(bo, evict, intr, |
| 1295 | no_wait_gpu, new_mem); | 1298 | no_wait_gpu, new_reg); |
| 1296 | if (!ret) | 1299 | if (!ret) |
| 1297 | goto out; | 1300 | goto out; |
| 1298 | } | 1301 | } |
| @@ -1300,10 +1303,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 1300 | /* Fallback to software copy. */ | 1303 | /* Fallback to software copy. */ |
| 1301 | ret = ttm_bo_wait(bo, intr, no_wait_gpu); | 1304 | ret = ttm_bo_wait(bo, intr, no_wait_gpu); |
| 1302 | if (ret == 0) | 1305 | if (ret == 0) |
| 1303 | ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem); | 1306 | ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg); |
| 1304 | 1307 | ||
| 1305 | out: | 1308 | out: |
| 1306 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { | 1309 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
| 1307 | if (ret) | 1310 | if (ret) |
| 1308 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | 1311 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); |
| 1309 | else | 1312 | else |
| @@ -1323,54 +1326,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |||
| 1323 | } | 1326 | } |
| 1324 | 1327 | ||
| 1325 | static int | 1328 | static int |
| 1326 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 1329 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) |
| 1327 | { | 1330 | { |
| 1328 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 1331 | struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type]; |
| 1329 | struct nouveau_drm *drm = nouveau_bdev(bdev); | 1332 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
| 1330 | struct nvkm_device *device = nvxx_device(&drm->device); | 1333 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 1331 | struct nvkm_mem *node = mem->mm_node; | 1334 | struct nvkm_mem *mem = reg->mm_node; |
| 1332 | int ret; | 1335 | int ret; |
| 1333 | 1336 | ||
| 1334 | mem->bus.addr = NULL; | 1337 | reg->bus.addr = NULL; |
| 1335 | mem->bus.offset = 0; | 1338 | reg->bus.offset = 0; |
| 1336 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | 1339 | reg->bus.size = reg->num_pages << PAGE_SHIFT; |
| 1337 | mem->bus.base = 0; | 1340 | reg->bus.base = 0; |
| 1338 | mem->bus.is_iomem = false; | 1341 | reg->bus.is_iomem = false; |
| 1339 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | 1342 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
| 1340 | return -EINVAL; | 1343 | return -EINVAL; |
| 1341 | switch (mem->mem_type) { | 1344 | switch (reg->mem_type) { |
| 1342 | case TTM_PL_SYSTEM: | 1345 | case TTM_PL_SYSTEM: |
| 1343 | /* System memory */ | 1346 | /* System memory */ |
| 1344 | return 0; | 1347 | return 0; |
| 1345 | case TTM_PL_TT: | 1348 | case TTM_PL_TT: |
| 1346 | #if IS_ENABLED(CONFIG_AGP) | 1349 | #if IS_ENABLED(CONFIG_AGP) |
| 1347 | if (drm->agp.bridge) { | 1350 | if (drm->agp.bridge) { |
| 1348 | mem->bus.offset = mem->start << PAGE_SHIFT; | 1351 | reg->bus.offset = reg->start << PAGE_SHIFT; |
| 1349 | mem->bus.base = drm->agp.base; | 1352 | reg->bus.base = drm->agp.base; |
| 1350 | mem->bus.is_iomem = !drm->agp.cma; | 1353 | reg->bus.is_iomem = !drm->agp.cma; |
| 1351 | } | 1354 | } |
| 1352 | #endif | 1355 | #endif |
| 1353 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) | 1356 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype) |
| 1354 | /* untiled */ | 1357 | /* untiled */ |
| 1355 | break; | 1358 | break; |
| 1356 | /* fallthrough, tiled memory */ | 1359 | /* fallthrough, tiled memory */ |
| 1357 | case TTM_PL_VRAM: | 1360 | case TTM_PL_VRAM: |
| 1358 | mem->bus.offset = mem->start << PAGE_SHIFT; | 1361 | reg->bus.offset = reg->start << PAGE_SHIFT; |
| 1359 | mem->bus.base = device->func->resource_addr(device, 1); | 1362 | reg->bus.base = device->func->resource_addr(device, 1); |
| 1360 | mem->bus.is_iomem = true; | 1363 | reg->bus.is_iomem = true; |
| 1361 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { | 1364 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
| 1362 | struct nvkm_bar *bar = nvxx_bar(&drm->device); | 1365 | struct nvkm_bar *bar = nvxx_bar(&drm->client.device); |
| 1363 | int page_shift = 12; | 1366 | int page_shift = 12; |
| 1364 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI) | 1367 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) |
| 1365 | page_shift = node->page_shift; | 1368 | page_shift = mem->page_shift; |
| 1366 | 1369 | ||
| 1367 | ret = nvkm_bar_umap(bar, node->size << 12, page_shift, | 1370 | ret = nvkm_bar_umap(bar, mem->size << 12, page_shift, |
| 1368 | &node->bar_vma); | 1371 | &mem->bar_vma); |
| 1369 | if (ret) | 1372 | if (ret) |
| 1370 | return ret; | 1373 | return ret; |
| 1371 | 1374 | ||
| 1372 | nvkm_vm_map(&node->bar_vma, node); | 1375 | nvkm_vm_map(&mem->bar_vma, mem); |
| 1373 | mem->bus.offset = node->bar_vma.offset; | 1376 | reg->bus.offset = mem->bar_vma.offset; |
| 1374 | } | 1377 | } |
| 1375 | break; | 1378 | break; |
| 1376 | default: | 1379 | default: |
| @@ -1380,15 +1383,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
| 1380 | } | 1383 | } |
| 1381 | 1384 | ||
| 1382 | static void | 1385 | static void |
| 1383 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 1386 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) |
| 1384 | { | 1387 | { |
| 1385 | struct nvkm_mem *node = mem->mm_node; | 1388 | struct nvkm_mem *mem = reg->mm_node; |
| 1386 | 1389 | ||
| 1387 | if (!node->bar_vma.node) | 1390 | if (!mem->bar_vma.node) |
| 1388 | return; | 1391 | return; |
| 1389 | 1392 | ||
| 1390 | nvkm_vm_unmap(&node->bar_vma); | 1393 | nvkm_vm_unmap(&mem->bar_vma); |
| 1391 | nvkm_vm_put(&node->bar_vma); | 1394 | nvkm_vm_put(&mem->bar_vma); |
| 1392 | } | 1395 | } |
| 1393 | 1396 | ||
| 1394 | static int | 1397 | static int |
| @@ -1396,7 +1399,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
| 1396 | { | 1399 | { |
| 1397 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1400 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 1398 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1401 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1399 | struct nvkm_device *device = nvxx_device(&drm->device); | 1402 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 1400 | u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; | 1403 | u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; |
| 1401 | int i, ret; | 1404 | int i, ret; |
| 1402 | 1405 | ||
| @@ -1404,7 +1407,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
| 1404 | * nothing to do here. | 1407 | * nothing to do here. |
| 1405 | */ | 1408 | */ |
| 1406 | if (bo->mem.mem_type != TTM_PL_VRAM) { | 1409 | if (bo->mem.mem_type != TTM_PL_VRAM) { |
| 1407 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || | 1410 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || |
| 1408 | !nouveau_bo_tile_layout(nvbo)) | 1411 | !nouveau_bo_tile_layout(nvbo)) |
| 1409 | return 0; | 1412 | return 0; |
| 1410 | 1413 | ||
| @@ -1419,7 +1422,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
| 1419 | } | 1422 | } |
| 1420 | 1423 | ||
| 1421 | /* make sure bo is in mappable vram */ | 1424 | /* make sure bo is in mappable vram */ |
| 1422 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA || | 1425 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || |
| 1423 | bo->mem.start + bo->mem.num_pages < mappable) | 1426 | bo->mem.start + bo->mem.num_pages < mappable) |
| 1424 | return 0; | 1427 | return 0; |
| 1425 | 1428 | ||
| @@ -1461,7 +1464,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) | |||
| 1461 | } | 1464 | } |
| 1462 | 1465 | ||
| 1463 | drm = nouveau_bdev(ttm->bdev); | 1466 | drm = nouveau_bdev(ttm->bdev); |
| 1464 | device = nvxx_device(&drm->device); | 1467 | device = nvxx_device(&drm->client.device); |
| 1465 | dev = drm->dev; | 1468 | dev = drm->dev; |
| 1466 | pdev = device->dev; | 1469 | pdev = device->dev; |
| 1467 | 1470 | ||
| @@ -1518,7 +1521,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
| 1518 | return; | 1521 | return; |
| 1519 | 1522 | ||
| 1520 | drm = nouveau_bdev(ttm->bdev); | 1523 | drm = nouveau_bdev(ttm->bdev); |
| 1521 | device = nvxx_device(&drm->device); | 1524 | device = nvxx_device(&drm->client.device); |
| 1522 | dev = drm->dev; | 1525 | dev = drm->dev; |
| 1523 | pdev = device->dev; | 1526 | pdev = device->dev; |
| 1524 | 1527 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index e42360983229..b06a5385d6dd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h | |||
| @@ -26,6 +26,8 @@ struct nouveau_bo { | |||
| 26 | struct list_head vma_list; | 26 | struct list_head vma_list; |
| 27 | unsigned page_shift; | 27 | unsigned page_shift; |
| 28 | 28 | ||
| 29 | struct nouveau_cli *cli; | ||
| 30 | |||
| 29 | u32 tile_mode; | 31 | u32 tile_mode; |
| 30 | u32 tile_flags; | 32 | u32 tile_flags; |
| 31 | struct nouveau_drm_tile *tile; | 33 | struct nouveau_drm_tile *tile; |
| @@ -69,7 +71,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) | |||
| 69 | extern struct ttm_bo_driver nouveau_bo_driver; | 71 | extern struct ttm_bo_driver nouveau_bo_driver; |
| 70 | 72 | ||
| 71 | void nouveau_bo_move_init(struct nouveau_drm *); | 73 | void nouveau_bo_move_init(struct nouveau_drm *); |
| 72 | int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, | 74 | int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, |
| 73 | u32 tile_mode, u32 tile_flags, struct sg_table *sg, | 75 | u32 tile_mode, u32 tile_flags, struct sg_table *sg, |
| 74 | struct reservation_object *robj, | 76 | struct reservation_object *robj, |
| 75 | struct nouveau_bo **); | 77 | struct nouveau_bo **); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index f9b3c811187e..dbc41fa86ee8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c | |||
| @@ -45,10 +45,20 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM"); | |||
| 45 | int nouveau_vram_pushbuf; | 45 | int nouveau_vram_pushbuf; |
| 46 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); | 46 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); |
| 47 | 47 | ||
| 48 | static int | ||
| 49 | nouveau_channel_killed(struct nvif_notify *ntfy) | ||
| 50 | { | ||
| 51 | struct nouveau_channel *chan = container_of(ntfy, typeof(*chan), kill); | ||
| 52 | struct nouveau_cli *cli = (void *)chan->user.client; | ||
| 53 | NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid); | ||
| 54 | atomic_set(&chan->killed, 1); | ||
| 55 | return NVIF_NOTIFY_DROP; | ||
| 56 | } | ||
| 57 | |||
| 48 | int | 58 | int |
| 49 | nouveau_channel_idle(struct nouveau_channel *chan) | 59 | nouveau_channel_idle(struct nouveau_channel *chan) |
| 50 | { | 60 | { |
| 51 | if (likely(chan && chan->fence)) { | 61 | if (likely(chan && chan->fence && !atomic_read(&chan->killed))) { |
| 52 | struct nouveau_cli *cli = (void *)chan->user.client; | 62 | struct nouveau_cli *cli = (void *)chan->user.client; |
| 53 | struct nouveau_fence *fence = NULL; | 63 | struct nouveau_fence *fence = NULL; |
| 54 | int ret; | 64 | int ret; |
| @@ -78,6 +88,7 @@ nouveau_channel_del(struct nouveau_channel **pchan) | |||
| 78 | nvif_object_fini(&chan->nvsw); | 88 | nvif_object_fini(&chan->nvsw); |
| 79 | nvif_object_fini(&chan->gart); | 89 | nvif_object_fini(&chan->gart); |
| 80 | nvif_object_fini(&chan->vram); | 90 | nvif_object_fini(&chan->vram); |
| 91 | nvif_notify_fini(&chan->kill); | ||
| 81 | nvif_object_fini(&chan->user); | 92 | nvif_object_fini(&chan->user); |
| 82 | nvif_object_fini(&chan->push.ctxdma); | 93 | nvif_object_fini(&chan->push.ctxdma); |
| 83 | nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); | 94 | nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); |
| @@ -107,13 +118,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, | |||
| 107 | 118 | ||
| 108 | chan->device = device; | 119 | chan->device = device; |
| 109 | chan->drm = drm; | 120 | chan->drm = drm; |
| 121 | atomic_set(&chan->killed, 0); | ||
| 110 | 122 | ||
| 111 | /* allocate memory for dma push buffer */ | 123 | /* allocate memory for dma push buffer */ |
| 112 | target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; | 124 | target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; |
| 113 | if (nouveau_vram_pushbuf) | 125 | if (nouveau_vram_pushbuf) |
| 114 | target = TTM_PL_FLAG_VRAM; | 126 | target = TTM_PL_FLAG_VRAM; |
| 115 | 127 | ||
| 116 | ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL, | 128 | ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL, |
| 117 | &chan->push.buffer); | 129 | &chan->push.buffer); |
| 118 | if (ret == 0) { | 130 | if (ret == 0) { |
| 119 | ret = nouveau_bo_pin(chan->push.buffer, target, false); | 131 | ret = nouveau_bo_pin(chan->push.buffer, target, false); |
| @@ -301,12 +313,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
| 301 | { | 313 | { |
| 302 | struct nvif_device *device = chan->device; | 314 | struct nvif_device *device = chan->device; |
| 303 | struct nouveau_cli *cli = (void *)chan->user.client; | 315 | struct nouveau_cli *cli = (void *)chan->user.client; |
| 316 | struct nouveau_drm *drm = chan->drm; | ||
| 304 | struct nvkm_mmu *mmu = nvxx_mmu(device); | 317 | struct nvkm_mmu *mmu = nvxx_mmu(device); |
| 305 | struct nv_dma_v0 args = {}; | 318 | struct nv_dma_v0 args = {}; |
| 306 | int ret, i; | 319 | int ret, i; |
| 307 | 320 | ||
| 308 | nvif_object_map(&chan->user); | 321 | nvif_object_map(&chan->user); |
| 309 | 322 | ||
| 323 | if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) { | ||
| 324 | ret = nvif_notify_init(&chan->user, nouveau_channel_killed, | ||
| 325 | true, NV906F_V0_NTFY_KILLED, | ||
| 326 | NULL, 0, 0, &chan->kill); | ||
| 327 | if (ret == 0) | ||
| 328 | ret = nvif_notify_get(&chan->kill); | ||
| 329 | if (ret) { | ||
| 330 | NV_ERROR(drm, "Failed to request channel kill " | ||
| 331 | "notification: %d\n", ret); | ||
| 332 | return ret; | ||
| 333 | } | ||
| 334 | } | ||
| 335 | |||
| 310 | /* allocate dma objects to cover all allowed vram, and gart */ | 336 | /* allocate dma objects to cover all allowed vram, and gart */ |
| 311 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { | 337 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { |
| 312 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { | 338 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h index 48062c94f36d..46b947ba1cf4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.h +++ b/drivers/gpu/drm/nouveau/nouveau_chan.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #ifndef __NOUVEAU_CHAN_H__ | 1 | #ifndef __NOUVEAU_CHAN_H__ |
| 2 | #define __NOUVEAU_CHAN_H__ | 2 | #define __NOUVEAU_CHAN_H__ |
| 3 | |||
| 4 | #include <nvif/object.h> | 3 | #include <nvif/object.h> |
| 4 | #include <nvif/notify.h> | ||
| 5 | struct nvif_device; | 5 | struct nvif_device; |
| 6 | 6 | ||
| 7 | struct nouveau_channel { | 7 | struct nouveau_channel { |
| @@ -38,6 +38,9 @@ struct nouveau_channel { | |||
| 38 | u32 user_put; | 38 | u32 user_put; |
| 39 | 39 | ||
| 40 | struct nvif_object user; | 40 | struct nvif_object user; |
| 41 | |||
| 42 | struct nvif_notify kill; | ||
| 43 | atomic_t killed; | ||
| 41 | }; | 44 | }; |
| 42 | 45 | ||
| 43 | 46 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 966d20ab4de4..f5add64c093f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -419,7 +419,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) | |||
| 419 | struct drm_device *dev = connector->dev; | 419 | struct drm_device *dev = connector->dev; |
| 420 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 420 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
| 421 | struct nouveau_drm *drm = nouveau_drm(dev); | 421 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 422 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); | 422 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); |
| 423 | struct nouveau_encoder *nv_encoder; | 423 | struct nouveau_encoder *nv_encoder; |
| 424 | struct drm_encoder *encoder; | 424 | struct drm_encoder *encoder; |
| 425 | int i, panel = -ENODEV; | 425 | int i, panel = -ENODEV; |
| @@ -521,7 +521,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, | |||
| 521 | return; | 521 | return; |
| 522 | nv_connector->detected_encoder = nv_encoder; | 522 | nv_connector->detected_encoder = nv_encoder; |
| 523 | 523 | ||
| 524 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { | 524 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
| 525 | connector->interlace_allowed = true; | 525 | connector->interlace_allowed = true; |
| 526 | connector->doublescan_allowed = true; | 526 | connector->doublescan_allowed = true; |
| 527 | } else | 527 | } else |
| @@ -531,8 +531,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector, | |||
| 531 | connector->interlace_allowed = false; | 531 | connector->interlace_allowed = false; |
| 532 | } else { | 532 | } else { |
| 533 | connector->doublescan_allowed = true; | 533 | connector->doublescan_allowed = true; |
| 534 | if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN || | 534 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN || |
| 535 | (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && | 535 | (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && |
| 536 | (dev->pdev->device & 0x0ff0) != 0x0100 && | 536 | (dev->pdev->device & 0x0ff0) != 0x0100 && |
| 537 | (dev->pdev->device & 0x0ff0) != 0x0150)) | 537 | (dev->pdev->device & 0x0ff0) != 0x0150)) |
| 538 | /* HW is broken */ | 538 | /* HW is broken */ |
| @@ -984,17 +984,17 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi) | |||
| 984 | /* Note: these limits are conservative, some Fermi's | 984 | /* Note: these limits are conservative, some Fermi's |
| 985 | * can do 297 MHz. Unclear how this can be determined. | 985 | * can do 297 MHz. Unclear how this can be determined. |
| 986 | */ | 986 | */ |
| 987 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_KEPLER) | 987 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KEPLER) |
| 988 | return 297000; | 988 | return 297000; |
| 989 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI) | 989 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) |
| 990 | return 225000; | 990 | return 225000; |
| 991 | } | 991 | } |
| 992 | if (dcb->location != DCB_LOC_ON_CHIP || | 992 | if (dcb->location != DCB_LOC_ON_CHIP || |
| 993 | drm->device.info.chipset >= 0x46) | 993 | drm->client.device.info.chipset >= 0x46) |
| 994 | return 165000; | 994 | return 165000; |
| 995 | else if (drm->device.info.chipset >= 0x40) | 995 | else if (drm->client.device.info.chipset >= 0x40) |
| 996 | return 155000; | 996 | return 155000; |
| 997 | else if (drm->device.info.chipset >= 0x18) | 997 | else if (drm->client.device.info.chipset >= 0x18) |
| 998 | return 135000; | 998 | return 135000; |
| 999 | else | 999 | else |
| 1000 | return 112000; | 1000 | return 112000; |
| @@ -1041,7 +1041,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
| 1041 | clock = clock * (connector->display_info.bpc * 3) / 10; | 1041 | clock = clock * (connector->display_info.bpc * 3) / 10; |
| 1042 | break; | 1042 | break; |
| 1043 | default: | 1043 | default: |
| 1044 | BUG_ON(1); | 1044 | BUG(); |
| 1045 | return MODE_BAD; | 1045 | return MODE_BAD; |
| 1046 | } | 1046 | } |
| 1047 | 1047 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 411c12cdb249..fd64dfdc7d4f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c | |||
| @@ -259,8 +259,9 @@ nouveau_debugfs_init(struct nouveau_drm *drm) | |||
| 259 | if (!drm->debugfs) | 259 | if (!drm->debugfs) |
| 260 | return -ENOMEM; | 260 | return -ENOMEM; |
| 261 | 261 | ||
| 262 | ret = nvif_object_init(&drm->device.object, 0, NVIF_CLASS_CONTROL, | 262 | ret = nvif_object_init(&drm->client.device.object, 0, |
| 263 | NULL, 0, &drm->debugfs->ctrl); | 263 | NVIF_CLASS_CONTROL, NULL, 0, |
| 264 | &drm->debugfs->ctrl); | ||
| 264 | if (ret) | 265 | if (ret) |
| 265 | return ret; | 266 | return ret; |
| 266 | 267 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6b570079d185..72fdba1a1c5d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -414,7 +414,8 @@ nouveau_display_init(struct drm_device *dev) | |||
| 414 | return ret; | 414 | return ret; |
| 415 | 415 | ||
| 416 | /* enable polling for external displays */ | 416 | /* enable polling for external displays */ |
| 417 | drm_kms_helper_poll_enable(dev); | 417 | if (!dev->mode_config.poll_enabled) |
| 418 | drm_kms_helper_poll_enable(dev); | ||
| 418 | 419 | ||
| 419 | /* enable hotplug interrupts */ | 420 | /* enable hotplug interrupts */ |
| 420 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 421 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| @@ -495,7 +496,7 @@ int | |||
| 495 | nouveau_display_create(struct drm_device *dev) | 496 | nouveau_display_create(struct drm_device *dev) |
| 496 | { | 497 | { |
| 497 | struct nouveau_drm *drm = nouveau_drm(dev); | 498 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 498 | struct nvkm_device *device = nvxx_device(&drm->device); | 499 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 499 | struct nouveau_display *disp; | 500 | struct nouveau_display *disp; |
| 500 | int ret; | 501 | int ret; |
| 501 | 502 | ||
| @@ -512,15 +513,15 @@ nouveau_display_create(struct drm_device *dev) | |||
| 512 | 513 | ||
| 513 | dev->mode_config.min_width = 0; | 514 | dev->mode_config.min_width = 0; |
| 514 | dev->mode_config.min_height = 0; | 515 | dev->mode_config.min_height = 0; |
| 515 | if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) { | 516 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) { |
| 516 | dev->mode_config.max_width = 2048; | 517 | dev->mode_config.max_width = 2048; |
| 517 | dev->mode_config.max_height = 2048; | 518 | dev->mode_config.max_height = 2048; |
| 518 | } else | 519 | } else |
| 519 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { | 520 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
| 520 | dev->mode_config.max_width = 4096; | 521 | dev->mode_config.max_width = 4096; |
| 521 | dev->mode_config.max_height = 4096; | 522 | dev->mode_config.max_height = 4096; |
| 522 | } else | 523 | } else |
| 523 | if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) { | 524 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) { |
| 524 | dev->mode_config.max_width = 8192; | 525 | dev->mode_config.max_width = 8192; |
| 525 | dev->mode_config.max_height = 8192; | 526 | dev->mode_config.max_height = 8192; |
| 526 | } else { | 527 | } else { |
| @@ -531,7 +532,7 @@ nouveau_display_create(struct drm_device *dev) | |||
| 531 | dev->mode_config.preferred_depth = 24; | 532 | dev->mode_config.preferred_depth = 24; |
| 532 | dev->mode_config.prefer_shadow = 1; | 533 | dev->mode_config.prefer_shadow = 1; |
| 533 | 534 | ||
| 534 | if (drm->device.info.chipset < 0x11) | 535 | if (drm->client.device.info.chipset < 0x11) |
| 535 | dev->mode_config.async_page_flip = false; | 536 | dev->mode_config.async_page_flip = false; |
| 536 | else | 537 | else |
| 537 | dev->mode_config.async_page_flip = true; | 538 | dev->mode_config.async_page_flip = true; |
| @@ -558,7 +559,7 @@ nouveau_display_create(struct drm_device *dev) | |||
| 558 | int i; | 559 | int i; |
| 559 | 560 | ||
| 560 | for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { | 561 | for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { |
| 561 | ret = nvif_object_init(&drm->device.object, 0, | 562 | ret = nvif_object_init(&drm->client.device.object, 0, |
| 562 | oclass[i], NULL, 0, &disp->disp); | 563 | oclass[i], NULL, 0, &disp->disp); |
| 563 | } | 564 | } |
| 564 | 565 | ||
| @@ -1057,6 +1058,7 @@ int | |||
| 1057 | nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, | 1058 | nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, |
| 1058 | struct drm_mode_create_dumb *args) | 1059 | struct drm_mode_create_dumb *args) |
| 1059 | { | 1060 | { |
| 1061 | struct nouveau_cli *cli = nouveau_cli(file_priv); | ||
| 1060 | struct nouveau_bo *bo; | 1062 | struct nouveau_bo *bo; |
| 1061 | uint32_t domain; | 1063 | uint32_t domain; |
| 1062 | int ret; | 1064 | int ret; |
| @@ -1066,12 +1068,12 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, | |||
| 1066 | args->size = roundup(args->size, PAGE_SIZE); | 1068 | args->size = roundup(args->size, PAGE_SIZE); |
| 1067 | 1069 | ||
| 1068 | /* Use VRAM if there is any ; otherwise fallback to system memory */ | 1070 | /* Use VRAM if there is any ; otherwise fallback to system memory */ |
| 1069 | if (nouveau_drm(dev)->device.info.ram_size != 0) | 1071 | if (nouveau_drm(dev)->client.device.info.ram_size != 0) |
| 1070 | domain = NOUVEAU_GEM_DOMAIN_VRAM; | 1072 | domain = NOUVEAU_GEM_DOMAIN_VRAM; |
| 1071 | else | 1073 | else |
| 1072 | domain = NOUVEAU_GEM_DOMAIN_GART; | 1074 | domain = NOUVEAU_GEM_DOMAIN_GART; |
| 1073 | 1075 | ||
| 1074 | ret = nouveau_gem_new(dev, args->size, 0, domain, 0, 0, &bo); | 1076 | ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo); |
| 1075 | if (ret) | 1077 | if (ret) |
| 1076 | return ret; | 1078 | return ret; |
| 1077 | 1079 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 279497b15e7b..d234a3b70bad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -37,6 +37,8 @@ | |||
| 37 | #include <core/pci.h> | 37 | #include <core/pci.h> |
| 38 | #include <core/tegra.h> | 38 | #include <core/tegra.h> |
| 39 | 39 | ||
| 40 | #include <nvif/driver.h> | ||
| 41 | |||
| 40 | #include <nvif/class.h> | 42 | #include <nvif/class.h> |
| 41 | #include <nvif/cl0002.h> | 43 | #include <nvif/cl0002.h> |
| 42 | #include <nvif/cla06f.h> | 44 | #include <nvif/cla06f.h> |
| @@ -109,35 +111,53 @@ nouveau_name(struct drm_device *dev) | |||
| 109 | return nouveau_platform_name(dev->platformdev); | 111 | return nouveau_platform_name(dev->platformdev); |
| 110 | } | 112 | } |
| 111 | 113 | ||
| 114 | static void | ||
| 115 | nouveau_cli_fini(struct nouveau_cli *cli) | ||
| 116 | { | ||
| 117 | nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); | ||
| 118 | usif_client_fini(cli); | ||
| 119 | nvif_device_fini(&cli->device); | ||
| 120 | nvif_client_fini(&cli->base); | ||
| 121 | } | ||
| 122 | |||
| 112 | static int | 123 | static int |
| 113 | nouveau_cli_create(struct drm_device *dev, const char *sname, | 124 | nouveau_cli_init(struct nouveau_drm *drm, const char *sname, |
| 114 | int size, void **pcli) | 125 | struct nouveau_cli *cli) |
| 115 | { | 126 | { |
| 116 | struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL); | 127 | u64 device = nouveau_name(drm->dev); |
| 117 | int ret; | 128 | int ret; |
| 118 | if (cli) { | ||
| 119 | snprintf(cli->name, sizeof(cli->name), "%s", sname); | ||
| 120 | cli->dev = dev; | ||
| 121 | 129 | ||
| 122 | ret = nvif_client_init(NULL, cli->name, nouveau_name(dev), | 130 | snprintf(cli->name, sizeof(cli->name), "%s", sname); |
| 123 | nouveau_config, nouveau_debug, | 131 | cli->dev = drm->dev; |
| 132 | mutex_init(&cli->mutex); | ||
| 133 | usif_client_init(cli); | ||
| 134 | |||
| 135 | if (cli == &drm->client) { | ||
| 136 | ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug, | ||
| 137 | cli->name, device, &cli->base); | ||
| 138 | } else { | ||
| 139 | ret = nvif_client_init(&drm->client.base, cli->name, device, | ||
| 124 | &cli->base); | 140 | &cli->base); |
| 125 | if (ret == 0) { | ||
| 126 | mutex_init(&cli->mutex); | ||
| 127 | usif_client_init(cli); | ||
| 128 | } | ||
| 129 | return ret; | ||
| 130 | } | 141 | } |
| 131 | return -ENOMEM; | 142 | if (ret) { |
| 132 | } | 143 | NV_ERROR(drm, "Client allocation failed: %d\n", ret); |
| 144 | goto done; | ||
| 145 | } | ||
| 133 | 146 | ||
| 134 | static void | 147 | ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE, |
| 135 | nouveau_cli_destroy(struct nouveau_cli *cli) | 148 | &(struct nv_device_v0) { |
| 136 | { | 149 | .device = ~0, |
| 137 | nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); | 150 | }, sizeof(struct nv_device_v0), |
| 138 | nvif_client_fini(&cli->base); | 151 | &cli->device); |
| 139 | usif_client_fini(cli); | 152 | if (ret) { |
| 140 | kfree(cli); | 153 | NV_ERROR(drm, "Device allocation failed: %d\n", ret); |
| 154 | goto done; | ||
| 155 | } | ||
| 156 | |||
| 157 | done: | ||
| 158 | if (ret) | ||
| 159 | nouveau_cli_fini(cli); | ||
| 160 | return ret; | ||
| 141 | } | 161 | } |
| 142 | 162 | ||
| 143 | static void | 163 | static void |
| @@ -161,7 +181,7 @@ nouveau_accel_fini(struct nouveau_drm *drm) | |||
| 161 | static void | 181 | static void |
| 162 | nouveau_accel_init(struct nouveau_drm *drm) | 182 | nouveau_accel_init(struct nouveau_drm *drm) |
| 163 | { | 183 | { |
| 164 | struct nvif_device *device = &drm->device; | 184 | struct nvif_device *device = &drm->client.device; |
| 165 | struct nvif_sclass *sclass; | 185 | struct nvif_sclass *sclass; |
| 166 | u32 arg0, arg1; | 186 | u32 arg0, arg1; |
| 167 | int ret, i, n; | 187 | int ret, i, n; |
| @@ -215,7 +235,7 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
| 215 | } | 235 | } |
| 216 | 236 | ||
| 217 | if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { | 237 | if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { |
| 218 | ret = nouveau_channel_new(drm, &drm->device, | 238 | ret = nouveau_channel_new(drm, &drm->client.device, |
| 219 | NVA06F_V0_ENGINE_CE0 | | 239 | NVA06F_V0_ENGINE_CE0 | |
| 220 | NVA06F_V0_ENGINE_CE1, | 240 | NVA06F_V0_ENGINE_CE1, |
| 221 | 0, &drm->cechan); | 241 | 0, &drm->cechan); |
| @@ -228,7 +248,7 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
| 228 | if (device->info.chipset >= 0xa3 && | 248 | if (device->info.chipset >= 0xa3 && |
| 229 | device->info.chipset != 0xaa && | 249 | device->info.chipset != 0xaa && |
| 230 | device->info.chipset != 0xac) { | 250 | device->info.chipset != 0xac) { |
| 231 | ret = nouveau_channel_new(drm, &drm->device, | 251 | ret = nouveau_channel_new(drm, &drm->client.device, |
| 232 | NvDmaFB, NvDmaTT, &drm->cechan); | 252 | NvDmaFB, NvDmaTT, &drm->cechan); |
| 233 | if (ret) | 253 | if (ret) |
| 234 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); | 254 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); |
| @@ -240,7 +260,8 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
| 240 | arg1 = NvDmaTT; | 260 | arg1 = NvDmaTT; |
| 241 | } | 261 | } |
| 242 | 262 | ||
| 243 | ret = nouveau_channel_new(drm, &drm->device, arg0, arg1, &drm->channel); | 263 | ret = nouveau_channel_new(drm, &drm->client.device, |
| 264 | arg0, arg1, &drm->channel); | ||
| 244 | if (ret) { | 265 | if (ret) { |
| 245 | NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); | 266 | NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); |
| 246 | nouveau_accel_fini(drm); | 267 | nouveau_accel_fini(drm); |
| @@ -280,8 +301,8 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
| 280 | } | 301 | } |
| 281 | 302 | ||
| 282 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { | 303 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { |
| 283 | ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false, | 304 | ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0, |
| 284 | NULL, &drm->notify); | 305 | false, NULL, &drm->notify); |
| 285 | if (ret) { | 306 | if (ret) { |
| 286 | NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); | 307 | NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); |
| 287 | nouveau_accel_fini(drm); | 308 | nouveau_accel_fini(drm); |
| @@ -407,12 +428,17 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 407 | struct nouveau_drm *drm; | 428 | struct nouveau_drm *drm; |
| 408 | int ret; | 429 | int ret; |
| 409 | 430 | ||
| 410 | ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm); | 431 | if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL))) |
| 432 | return -ENOMEM; | ||
| 433 | dev->dev_private = drm; | ||
| 434 | drm->dev = dev; | ||
| 435 | |||
| 436 | ret = nouveau_cli_init(drm, "DRM", &drm->client); | ||
| 411 | if (ret) | 437 | if (ret) |
| 412 | return ret; | 438 | return ret; |
| 413 | 439 | ||
| 414 | dev->dev_private = drm; | 440 | dev->irq_enabled = true; |
| 415 | drm->dev = dev; | 441 | |
| 416 | nvxx_client(&drm->client.base)->debug = | 442 | nvxx_client(&drm->client.base)->debug = |
| 417 | nvkm_dbgopt(nouveau_debug, "DRM"); | 443 | nvkm_dbgopt(nouveau_debug, "DRM"); |
| 418 | 444 | ||
| @@ -421,33 +447,24 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 421 | 447 | ||
| 422 | nouveau_get_hdmi_dev(drm); | 448 | nouveau_get_hdmi_dev(drm); |
| 423 | 449 | ||
| 424 | ret = nvif_device_init(&drm->client.base.object, 0, NV_DEVICE, | ||
| 425 | &(struct nv_device_v0) { | ||
| 426 | .device = ~0, | ||
| 427 | }, sizeof(struct nv_device_v0), | ||
| 428 | &drm->device); | ||
| 429 | if (ret) | ||
| 430 | goto fail_device; | ||
| 431 | |||
| 432 | dev->irq_enabled = true; | ||
| 433 | |||
| 434 | /* workaround an odd issue on nvc1 by disabling the device's | 450 | /* workaround an odd issue on nvc1 by disabling the device's |
| 435 | * nosnoop capability. hopefully won't cause issues until a | 451 | * nosnoop capability. hopefully won't cause issues until a |
| 436 | * better fix is found - assuming there is one... | 452 | * better fix is found - assuming there is one... |
| 437 | */ | 453 | */ |
| 438 | if (drm->device.info.chipset == 0xc1) | 454 | if (drm->client.device.info.chipset == 0xc1) |
| 439 | nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000); | 455 | nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000); |
| 440 | 456 | ||
| 441 | nouveau_vga_init(drm); | 457 | nouveau_vga_init(drm); |
| 442 | 458 | ||
| 443 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { | 459 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
| 444 | if (!nvxx_device(&drm->device)->mmu) { | 460 | if (!nvxx_device(&drm->client.device)->mmu) { |
| 445 | ret = -ENOSYS; | 461 | ret = -ENOSYS; |
| 446 | goto fail_device; | 462 | goto fail_device; |
| 447 | } | 463 | } |
| 448 | 464 | ||
| 449 | ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), | 465 | ret = nvkm_vm_new(nvxx_device(&drm->client.device), |
| 450 | 0x1000, NULL, &drm->client.vm); | 466 | 0, (1ULL << 40), 0x1000, NULL, |
| 467 | &drm->client.vm); | ||
| 451 | if (ret) | 468 | if (ret) |
| 452 | goto fail_device; | 469 | goto fail_device; |
| 453 | 470 | ||
| @@ -497,8 +514,8 @@ fail_bios: | |||
| 497 | fail_ttm: | 514 | fail_ttm: |
| 498 | nouveau_vga_fini(drm); | 515 | nouveau_vga_fini(drm); |
| 499 | fail_device: | 516 | fail_device: |
| 500 | nvif_device_fini(&drm->device); | 517 | nouveau_cli_fini(&drm->client); |
| 501 | nouveau_cli_destroy(&drm->client); | 518 | kfree(drm); |
| 502 | return ret; | 519 | return ret; |
| 503 | } | 520 | } |
| 504 | 521 | ||
| @@ -527,10 +544,10 @@ nouveau_drm_unload(struct drm_device *dev) | |||
| 527 | nouveau_ttm_fini(drm); | 544 | nouveau_ttm_fini(drm); |
| 528 | nouveau_vga_fini(drm); | 545 | nouveau_vga_fini(drm); |
| 529 | 546 | ||
| 530 | nvif_device_fini(&drm->device); | ||
| 531 | if (drm->hdmi_device) | 547 | if (drm->hdmi_device) |
| 532 | pci_dev_put(drm->hdmi_device); | 548 | pci_dev_put(drm->hdmi_device); |
| 533 | nouveau_cli_destroy(&drm->client); | 549 | nouveau_cli_fini(&drm->client); |
| 550 | kfree(drm); | ||
| 534 | } | 551 | } |
| 535 | 552 | ||
| 536 | void | 553 | void |
| @@ -560,7 +577,6 @@ static int | |||
| 560 | nouveau_do_suspend(struct drm_device *dev, bool runtime) | 577 | nouveau_do_suspend(struct drm_device *dev, bool runtime) |
| 561 | { | 578 | { |
| 562 | struct nouveau_drm *drm = nouveau_drm(dev); | 579 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 563 | struct nouveau_cli *cli; | ||
| 564 | int ret; | 580 | int ret; |
| 565 | 581 | ||
| 566 | nouveau_led_suspend(dev); | 582 | nouveau_led_suspend(dev); |
| @@ -590,7 +606,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) | |||
| 590 | goto fail_display; | 606 | goto fail_display; |
| 591 | } | 607 | } |
| 592 | 608 | ||
| 593 | NV_INFO(drm, "suspending client object trees...\n"); | 609 | NV_INFO(drm, "suspending fence...\n"); |
| 594 | if (drm->fence && nouveau_fence(drm)->suspend) { | 610 | if (drm->fence && nouveau_fence(drm)->suspend) { |
| 595 | if (!nouveau_fence(drm)->suspend(drm)) { | 611 | if (!nouveau_fence(drm)->suspend(drm)) { |
| 596 | ret = -ENOMEM; | 612 | ret = -ENOMEM; |
| @@ -598,13 +614,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) | |||
| 598 | } | 614 | } |
| 599 | } | 615 | } |
| 600 | 616 | ||
| 601 | list_for_each_entry(cli, &drm->clients, head) { | 617 | NV_INFO(drm, "suspending object tree...\n"); |
| 602 | ret = nvif_client_suspend(&cli->base); | ||
| 603 | if (ret) | ||
| 604 | goto fail_client; | ||
| 605 | } | ||
| 606 | |||
| 607 | NV_INFO(drm, "suspending kernel object tree...\n"); | ||
| 608 | ret = nvif_client_suspend(&drm->client.base); | 618 | ret = nvif_client_suspend(&drm->client.base); |
| 609 | if (ret) | 619 | if (ret) |
| 610 | goto fail_client; | 620 | goto fail_client; |
| @@ -612,10 +622,6 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) | |||
| 612 | return 0; | 622 | return 0; |
| 613 | 623 | ||
| 614 | fail_client: | 624 | fail_client: |
| 615 | list_for_each_entry_continue_reverse(cli, &drm->clients, head) { | ||
| 616 | nvif_client_resume(&cli->base); | ||
| 617 | } | ||
| 618 | |||
| 619 | if (drm->fence && nouveau_fence(drm)->resume) | 625 | if (drm->fence && nouveau_fence(drm)->resume) |
| 620 | nouveau_fence(drm)->resume(drm); | 626 | nouveau_fence(drm)->resume(drm); |
| 621 | 627 | ||
| @@ -631,19 +637,14 @@ static int | |||
| 631 | nouveau_do_resume(struct drm_device *dev, bool runtime) | 637 | nouveau_do_resume(struct drm_device *dev, bool runtime) |
| 632 | { | 638 | { |
| 633 | struct nouveau_drm *drm = nouveau_drm(dev); | 639 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 634 | struct nouveau_cli *cli; | ||
| 635 | 640 | ||
| 636 | NV_INFO(drm, "resuming kernel object tree...\n"); | 641 | NV_INFO(drm, "resuming object tree...\n"); |
| 637 | nvif_client_resume(&drm->client.base); | 642 | nvif_client_resume(&drm->client.base); |
| 638 | 643 | ||
| 639 | NV_INFO(drm, "resuming client object trees...\n"); | 644 | NV_INFO(drm, "resuming fence...\n"); |
| 640 | if (drm->fence && nouveau_fence(drm)->resume) | 645 | if (drm->fence && nouveau_fence(drm)->resume) |
| 641 | nouveau_fence(drm)->resume(drm); | 646 | nouveau_fence(drm)->resume(drm); |
| 642 | 647 | ||
| 643 | list_for_each_entry(cli, &drm->clients, head) { | ||
| 644 | nvif_client_resume(&cli->base); | ||
| 645 | } | ||
| 646 | |||
| 647 | nouveau_run_vbios_init(dev); | 648 | nouveau_run_vbios_init(dev); |
| 648 | 649 | ||
| 649 | if (dev->mode_config.num_crtc) { | 650 | if (dev->mode_config.num_crtc) { |
| @@ -758,7 +759,7 @@ nouveau_pmops_runtime_resume(struct device *dev) | |||
| 758 | { | 759 | { |
| 759 | struct pci_dev *pdev = to_pci_dev(dev); | 760 | struct pci_dev *pdev = to_pci_dev(dev); |
| 760 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 761 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 761 | struct nvif_device *device = &nouveau_drm(drm_dev)->device; | 762 | struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; |
| 762 | int ret; | 763 | int ret; |
| 763 | 764 | ||
| 764 | if (nouveau_runtime_pm == 0) | 765 | if (nouveau_runtime_pm == 0) |
| @@ -772,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev) | |||
| 772 | pci_set_master(pdev); | 773 | pci_set_master(pdev); |
| 773 | 774 | ||
| 774 | ret = nouveau_do_resume(drm_dev, true); | 775 | ret = nouveau_do_resume(drm_dev, true); |
| 775 | drm_kms_helper_poll_enable(drm_dev); | 776 | |
| 777 | if (!drm_dev->mode_config.poll_enabled) | ||
| 778 | drm_kms_helper_poll_enable(drm_dev); | ||
| 779 | |||
| 776 | /* do magic */ | 780 | /* do magic */ |
| 777 | nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); | 781 | nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); |
| 778 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); | 782 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); |
| @@ -841,20 +845,20 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) | |||
| 841 | get_task_comm(tmpname, current); | 845 | get_task_comm(tmpname, current); |
| 842 | snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); | 846 | snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); |
| 843 | 847 | ||
| 844 | ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli); | 848 | if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) |
| 849 | return ret; | ||
| 845 | 850 | ||
| 851 | ret = nouveau_cli_init(drm, name, cli); | ||
| 846 | if (ret) | 852 | if (ret) |
| 847 | goto out_suspend; | 853 | goto done; |
| 848 | 854 | ||
| 849 | cli->base.super = false; | 855 | cli->base.super = false; |
| 850 | 856 | ||
| 851 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { | 857 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
| 852 | ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), | 858 | ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0, |
| 853 | 0x1000, NULL, &cli->vm); | 859 | (1ULL << 40), 0x1000, NULL, &cli->vm); |
| 854 | if (ret) { | 860 | if (ret) |
| 855 | nouveau_cli_destroy(cli); | 861 | goto done; |
| 856 | goto out_suspend; | ||
| 857 | } | ||
| 858 | 862 | ||
| 859 | nvxx_client(&cli->base)->vm = cli->vm; | 863 | nvxx_client(&cli->base)->vm = cli->vm; |
| 860 | } | 864 | } |
| @@ -865,10 +869,14 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) | |||
| 865 | list_add(&cli->head, &drm->clients); | 869 | list_add(&cli->head, &drm->clients); |
| 866 | mutex_unlock(&drm->client.mutex); | 870 | mutex_unlock(&drm->client.mutex); |
| 867 | 871 | ||
| 868 | out_suspend: | 872 | done: |
| 873 | if (ret && cli) { | ||
| 874 | nouveau_cli_fini(cli); | ||
| 875 | kfree(cli); | ||
| 876 | } | ||
| 877 | |||
| 869 | pm_runtime_mark_last_busy(dev->dev); | 878 | pm_runtime_mark_last_busy(dev->dev); |
| 870 | pm_runtime_put_autosuspend(dev->dev); | 879 | pm_runtime_put_autosuspend(dev->dev); |
| 871 | |||
| 872 | return ret; | 880 | return ret; |
| 873 | } | 881 | } |
| 874 | 882 | ||
| @@ -895,7 +903,8 @@ static void | |||
| 895 | nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) | 903 | nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) |
| 896 | { | 904 | { |
| 897 | struct nouveau_cli *cli = nouveau_cli(fpriv); | 905 | struct nouveau_cli *cli = nouveau_cli(fpriv); |
| 898 | nouveau_cli_destroy(cli); | 906 | nouveau_cli_fini(cli); |
| 907 | kfree(cli); | ||
| 899 | pm_runtime_mark_last_busy(dev->dev); | 908 | pm_runtime_mark_last_busy(dev->dev); |
| 900 | pm_runtime_put_autosuspend(dev->dev); | 909 | pm_runtime_put_autosuspend(dev->dev); |
| 901 | } | 910 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8d5ed5bfdacb..eadec2f49ad3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -86,14 +86,17 @@ enum nouveau_drm_handle { | |||
| 86 | 86 | ||
| 87 | struct nouveau_cli { | 87 | struct nouveau_cli { |
| 88 | struct nvif_client base; | 88 | struct nvif_client base; |
| 89 | struct drm_device *dev; | ||
| 90 | struct mutex mutex; | ||
| 91 | |||
| 92 | struct nvif_device device; | ||
| 93 | |||
| 89 | struct nvkm_vm *vm; /*XXX*/ | 94 | struct nvkm_vm *vm; /*XXX*/ |
| 90 | struct list_head head; | 95 | struct list_head head; |
| 91 | struct mutex mutex; | ||
| 92 | void *abi16; | 96 | void *abi16; |
| 93 | struct list_head objects; | 97 | struct list_head objects; |
| 94 | struct list_head notifys; | 98 | struct list_head notifys; |
| 95 | char name[32]; | 99 | char name[32]; |
| 96 | struct drm_device *dev; | ||
| 97 | }; | 100 | }; |
| 98 | 101 | ||
| 99 | static inline struct nouveau_cli * | 102 | static inline struct nouveau_cli * |
| @@ -111,7 +114,6 @@ struct nouveau_drm { | |||
| 111 | struct nouveau_cli client; | 114 | struct nouveau_cli client; |
| 112 | struct drm_device *dev; | 115 | struct drm_device *dev; |
| 113 | 116 | ||
| 114 | struct nvif_device device; | ||
| 115 | struct list_head clients; | 117 | struct list_head clients; |
| 116 | 118 | ||
| 117 | struct { | 119 | struct { |
| @@ -165,6 +167,8 @@ struct nouveau_drm { | |||
| 165 | struct backlight_device *backlight; | 167 | struct backlight_device *backlight; |
| 166 | struct list_head bl_connectors; | 168 | struct list_head bl_connectors; |
| 167 | struct work_struct hpd_work; | 169 | struct work_struct hpd_work; |
| 170 | struct work_struct fbcon_work; | ||
| 171 | int fbcon_new_state; | ||
| 168 | #ifdef CONFIG_ACPI | 172 | #ifdef CONFIG_ACPI |
| 169 | struct notifier_block acpi_nb; | 173 | struct notifier_block acpi_nb; |
| 170 | #endif | 174 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index a9d48100e74f..2665a078b6da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -60,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
| 60 | { | 60 | { |
| 61 | struct nouveau_fbdev *fbcon = info->par; | 61 | struct nouveau_fbdev *fbcon = info->par; |
| 62 | struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); | 62 | struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); |
| 63 | struct nvif_device *device = &drm->device; | 63 | struct nvif_device *device = &drm->client.device; |
| 64 | int ret; | 64 | int ret; |
| 65 | 65 | ||
| 66 | if (info->state != FBINFO_STATE_RUNNING) | 66 | if (info->state != FBINFO_STATE_RUNNING) |
| @@ -92,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) | |||
| 92 | { | 92 | { |
| 93 | struct nouveau_fbdev *fbcon = info->par; | 93 | struct nouveau_fbdev *fbcon = info->par; |
| 94 | struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); | 94 | struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); |
| 95 | struct nvif_device *device = &drm->device; | 95 | struct nvif_device *device = &drm->client.device; |
| 96 | int ret; | 96 | int ret; |
| 97 | 97 | ||
| 98 | if (info->state != FBINFO_STATE_RUNNING) | 98 | if (info->state != FBINFO_STATE_RUNNING) |
| @@ -124,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 124 | { | 124 | { |
| 125 | struct nouveau_fbdev *fbcon = info->par; | 125 | struct nouveau_fbdev *fbcon = info->par; |
| 126 | struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); | 126 | struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); |
| 127 | struct nvif_device *device = &drm->device; | 127 | struct nvif_device *device = &drm->client.device; |
| 128 | int ret; | 128 | int ret; |
| 129 | 129 | ||
| 130 | if (info->state != FBINFO_STATE_RUNNING) | 130 | if (info->state != FBINFO_STATE_RUNNING) |
| @@ -266,10 +266,10 @@ nouveau_fbcon_accel_init(struct drm_device *dev) | |||
| 266 | struct fb_info *info = fbcon->helper.fbdev; | 266 | struct fb_info *info = fbcon->helper.fbdev; |
| 267 | int ret; | 267 | int ret; |
| 268 | 268 | ||
| 269 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) | 269 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) |
| 270 | ret = nv04_fbcon_accel_init(info); | 270 | ret = nv04_fbcon_accel_init(info); |
| 271 | else | 271 | else |
| 272 | if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) | 272 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) |
| 273 | ret = nv50_fbcon_accel_init(info); | 273 | ret = nv50_fbcon_accel_init(info); |
| 274 | else | 274 | else |
| 275 | ret = nvc0_fbcon_accel_init(info); | 275 | ret = nvc0_fbcon_accel_init(info); |
| @@ -324,7 +324,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
| 324 | container_of(helper, struct nouveau_fbdev, helper); | 324 | container_of(helper, struct nouveau_fbdev, helper); |
| 325 | struct drm_device *dev = fbcon->helper.dev; | 325 | struct drm_device *dev = fbcon->helper.dev; |
| 326 | struct nouveau_drm *drm = nouveau_drm(dev); | 326 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 327 | struct nvif_device *device = &drm->device; | 327 | struct nvif_device *device = &drm->client.device; |
| 328 | struct fb_info *info; | 328 | struct fb_info *info; |
| 329 | struct nouveau_framebuffer *fb; | 329 | struct nouveau_framebuffer *fb; |
| 330 | struct nouveau_channel *chan; | 330 | struct nouveau_channel *chan; |
| @@ -341,8 +341,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
| 341 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | 341 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
| 342 | sizes->surface_depth); | 342 | sizes->surface_depth); |
| 343 | 343 | ||
| 344 | ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height, | 344 | ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] * |
| 345 | 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); | 345 | mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM, |
| 346 | 0, 0x0000, &nvbo); | ||
| 346 | if (ret) { | 347 | if (ret) { |
| 347 | NV_ERROR(drm, "failed to allocate framebuffer\n"); | 348 | NV_ERROR(drm, "failed to allocate framebuffer\n"); |
| 348 | goto out; | 349 | goto out; |
| @@ -471,19 +472,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { | |||
| 471 | .fb_probe = nouveau_fbcon_create, | 472 | .fb_probe = nouveau_fbcon_create, |
| 472 | }; | 473 | }; |
| 473 | 474 | ||
| 475 | static void | ||
| 476 | nouveau_fbcon_set_suspend_work(struct work_struct *work) | ||
| 477 | { | ||
| 478 | struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); | ||
| 479 | int state = READ_ONCE(drm->fbcon_new_state); | ||
| 480 | |||
| 481 | if (state == FBINFO_STATE_RUNNING) | ||
| 482 | pm_runtime_get_sync(drm->dev->dev); | ||
| 483 | |||
| 484 | console_lock(); | ||
| 485 | if (state == FBINFO_STATE_RUNNING) | ||
| 486 | nouveau_fbcon_accel_restore(drm->dev); | ||
| 487 | drm_fb_helper_set_suspend(&drm->fbcon->helper, state); | ||
| 488 | if (state != FBINFO_STATE_RUNNING) | ||
| 489 | nouveau_fbcon_accel_save_disable(drm->dev); | ||
| 490 | console_unlock(); | ||
| 491 | |||
| 492 | if (state == FBINFO_STATE_RUNNING) { | ||
| 493 | pm_runtime_mark_last_busy(drm->dev->dev); | ||
| 494 | pm_runtime_put_sync(drm->dev->dev); | ||
| 495 | } | ||
| 496 | } | ||
| 497 | |||
| 474 | void | 498 | void |
| 475 | nouveau_fbcon_set_suspend(struct drm_device *dev, int state) | 499 | nouveau_fbcon_set_suspend(struct drm_device *dev, int state) |
| 476 | { | 500 | { |
| 477 | struct nouveau_drm *drm = nouveau_drm(dev); | 501 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 478 | if (drm->fbcon) { | 502 | |
| 479 | console_lock(); | 503 | if (!drm->fbcon) |
| 480 | if (state == FBINFO_STATE_RUNNING) | 504 | return; |
| 481 | nouveau_fbcon_accel_restore(dev); | 505 | |
| 482 | drm_fb_helper_set_suspend(&drm->fbcon->helper, state); | 506 | drm->fbcon_new_state = state; |
| 483 | if (state != FBINFO_STATE_RUNNING) | 507 | /* Since runtime resume can happen as a result of a sysfs operation, |
| 484 | nouveau_fbcon_accel_save_disable(dev); | 508 | * it's possible we already have the console locked. So handle fbcon |
| 485 | console_unlock(); | 509 | * init/deinit from a seperate work thread |
| 486 | } | 510 | */ |
| 511 | schedule_work(&drm->fbcon_work); | ||
| 487 | } | 512 | } |
| 488 | 513 | ||
| 489 | int | 514 | int |
| @@ -503,6 +528,7 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
| 503 | return -ENOMEM; | 528 | return -ENOMEM; |
| 504 | 529 | ||
| 505 | drm->fbcon = fbcon; | 530 | drm->fbcon = fbcon; |
| 531 | INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); | ||
| 506 | 532 | ||
| 507 | drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); | 533 | drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); |
| 508 | 534 | ||
| @@ -514,10 +540,10 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
| 514 | if (ret) | 540 | if (ret) |
| 515 | goto fini; | 541 | goto fini; |
| 516 | 542 | ||
| 517 | if (drm->device.info.ram_size <= 32 * 1024 * 1024) | 543 | if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) |
| 518 | preferred_bpp = 8; | 544 | preferred_bpp = 8; |
| 519 | else | 545 | else |
| 520 | if (drm->device.info.ram_size <= 64 * 1024 * 1024) | 546 | if (drm->client.device.info.ram_size <= 64 * 1024 * 1024) |
| 521 | preferred_bpp = 16; | 547 | preferred_bpp = 16; |
| 522 | else | 548 | else |
| 523 | preferred_bpp = 32; | 549 | preferred_bpp = 32; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index a6126c93f215..f3e551f1aa46 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
| @@ -190,7 +190,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha | |||
| 190 | return; | 190 | return; |
| 191 | 191 | ||
| 192 | ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler, | 192 | ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler, |
| 193 | false, G82_CHANNEL_DMA_V0_NTFY_UEVENT, | 193 | false, NV826E_V0_NTFY_NON_STALL_INTERRUPT, |
| 194 | &(struct nvif_notify_uevent_req) { }, | 194 | &(struct nvif_notify_uevent_req) { }, |
| 195 | sizeof(struct nvif_notify_uevent_req), | 195 | sizeof(struct nvif_notify_uevent_req), |
| 196 | sizeof(struct nvif_notify_uevent_rep), | 196 | sizeof(struct nvif_notify_uevent_rep), |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index ccdce1b4eec4..d5e58a38f160 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
| @@ -99,6 +99,7 @@ struct nv84_fence_priv { | |||
| 99 | struct nouveau_bo *bo; | 99 | struct nouveau_bo *bo; |
| 100 | struct nouveau_bo *bo_gart; | 100 | struct nouveau_bo *bo_gart; |
| 101 | u32 *suspend; | 101 | u32 *suspend; |
| 102 | struct mutex mutex; | ||
| 102 | }; | 103 | }; |
| 103 | 104 | ||
| 104 | int nv84_fence_context_new(struct nouveau_channel *); | 105 | int nv84_fence_context_new(struct nouveau_channel *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 201b52b750dd..ca5397beb357 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -175,11 +175,11 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) | |||
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | int | 177 | int |
| 178 | nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, | 178 | nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, |
| 179 | uint32_t tile_mode, uint32_t tile_flags, | 179 | uint32_t tile_mode, uint32_t tile_flags, |
| 180 | struct nouveau_bo **pnvbo) | 180 | struct nouveau_bo **pnvbo) |
| 181 | { | 181 | { |
| 182 | struct nouveau_drm *drm = nouveau_drm(dev); | 182 | struct nouveau_drm *drm = nouveau_drm(cli->dev); |
| 183 | struct nouveau_bo *nvbo; | 183 | struct nouveau_bo *nvbo; |
| 184 | u32 flags = 0; | 184 | u32 flags = 0; |
| 185 | int ret; | 185 | int ret; |
| @@ -194,7 +194,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, | |||
| 194 | if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) | 194 | if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) |
| 195 | flags |= TTM_PL_FLAG_UNCACHED; | 195 | flags |= TTM_PL_FLAG_UNCACHED; |
| 196 | 196 | ||
| 197 | ret = nouveau_bo_new(dev, size, align, flags, tile_mode, | 197 | ret = nouveau_bo_new(cli, size, align, flags, tile_mode, |
| 198 | tile_flags, NULL, NULL, pnvbo); | 198 | tile_flags, NULL, NULL, pnvbo); |
| 199 | if (ret) | 199 | if (ret) |
| 200 | return ret; | 200 | return ret; |
| @@ -206,12 +206,12 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, | |||
| 206 | */ | 206 | */ |
| 207 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | | 207 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | |
| 208 | NOUVEAU_GEM_DOMAIN_GART; | 208 | NOUVEAU_GEM_DOMAIN_GART; |
| 209 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) | 209 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
| 210 | nvbo->valid_domains &= domain; | 210 | nvbo->valid_domains &= domain; |
| 211 | 211 | ||
| 212 | /* Initialize the embedded gem-object. We return a single gem-reference | 212 | /* Initialize the embedded gem-object. We return a single gem-reference |
| 213 | * to the caller, instead of a normal nouveau_bo ttm reference. */ | 213 | * to the caller, instead of a normal nouveau_bo ttm reference. */ |
| 214 | ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); | 214 | ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size); |
| 215 | if (ret) { | 215 | if (ret) { |
| 216 | nouveau_bo_ref(NULL, pnvbo); | 216 | nouveau_bo_ref(NULL, pnvbo); |
| 217 | return -ENOMEM; | 217 | return -ENOMEM; |
| @@ -257,7 +257,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
| 257 | { | 257 | { |
| 258 | struct nouveau_drm *drm = nouveau_drm(dev); | 258 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 259 | struct nouveau_cli *cli = nouveau_cli(file_priv); | 259 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 260 | struct nvkm_fb *fb = nvxx_fb(&drm->device); | 260 | struct nvkm_fb *fb = nvxx_fb(&drm->client.device); |
| 261 | struct drm_nouveau_gem_new *req = data; | 261 | struct drm_nouveau_gem_new *req = data; |
| 262 | struct nouveau_bo *nvbo = NULL; | 262 | struct nouveau_bo *nvbo = NULL; |
| 263 | int ret = 0; | 263 | int ret = 0; |
| @@ -267,7 +267,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
| 267 | return -EINVAL; | 267 | return -EINVAL; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | ret = nouveau_gem_new(dev, req->info.size, req->align, | 270 | ret = nouveau_gem_new(cli, req->info.size, req->align, |
| 271 | req->info.domain, req->info.tile_mode, | 271 | req->info.domain, req->info.tile_mode, |
| 272 | req->info.tile_flags, &nvbo); | 272 | req->info.tile_flags, &nvbo); |
| 273 | if (ret) | 273 | if (ret) |
| @@ -496,7 +496,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, | |||
| 496 | return ret; | 496 | return ret; |
| 497 | } | 497 | } |
| 498 | 498 | ||
| 499 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { | 499 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
| 500 | if (nvbo->bo.offset == b->presumed.offset && | 500 | if (nvbo->bo.offset == b->presumed.offset && |
| 501 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | 501 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
| 502 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || | 502 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
| @@ -767,7 +767,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
| 767 | push[i].length); | 767 | push[i].length); |
| 768 | } | 768 | } |
| 769 | } else | 769 | } else |
| 770 | if (drm->device.info.chipset >= 0x25) { | 770 | if (drm->client.device.info.chipset >= 0x25) { |
| 771 | ret = RING_SPACE(chan, req->nr_push * 2); | 771 | ret = RING_SPACE(chan, req->nr_push * 2); |
| 772 | if (ret) { | 772 | if (ret) { |
| 773 | NV_PRINTK(err, cli, "cal_space: %d\n", ret); | 773 | NV_PRINTK(err, cli, "cal_space: %d\n", ret); |
| @@ -840,7 +840,7 @@ out_next: | |||
| 840 | req->suffix0 = 0x00000000; | 840 | req->suffix0 = 0x00000000; |
| 841 | req->suffix1 = 0x00000000; | 841 | req->suffix1 = 0x00000000; |
| 842 | } else | 842 | } else |
| 843 | if (drm->device.info.chipset >= 0x25) { | 843 | if (drm->client.device.info.chipset >= 0x25) { |
| 844 | req->suffix0 = 0x00020000; | 844 | req->suffix0 = 0x00020000; |
| 845 | req->suffix1 = 0x00000000; | 845 | req->suffix1 = 0x00000000; |
| 846 | } else { | 846 | } else { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index 7e32da2e037a..8fa6ed9ddd3a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h | |||
| @@ -16,7 +16,7 @@ nouveau_gem_object(struct drm_gem_object *gem) | |||
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | /* nouveau_gem.c */ | 18 | /* nouveau_gem.c */ |
| 19 | extern int nouveau_gem_new(struct drm_device *, int size, int align, | 19 | extern int nouveau_gem_new(struct nouveau_cli *, u64 size, int align, |
| 20 | uint32_t domain, uint32_t tile_mode, | 20 | uint32_t domain, uint32_t tile_mode, |
| 21 | uint32_t tile_flags, struct nouveau_bo **); | 21 | uint32_t tile_flags, struct nouveau_bo **); |
| 22 | extern void nouveau_gem_object_del(struct drm_gem_object *); | 22 | extern void nouveau_gem_object_del(struct drm_gem_object *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index 71f764bf4cc6..23b1670c1c2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c | |||
| @@ -43,7 +43,7 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) | |||
| 43 | { | 43 | { |
| 44 | struct drm_device *dev = dev_get_drvdata(d); | 44 | struct drm_device *dev = dev_get_drvdata(d); |
| 45 | struct nouveau_drm *drm = nouveau_drm(dev); | 45 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 46 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 46 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 47 | int temp = nvkm_therm_temp_get(therm); | 47 | int temp = nvkm_therm_temp_get(therm); |
| 48 | 48 | ||
| 49 | if (temp < 0) | 49 | if (temp < 0) |
| @@ -69,7 +69,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d, | |||
| 69 | { | 69 | { |
| 70 | struct drm_device *dev = dev_get_drvdata(d); | 70 | struct drm_device *dev = dev_get_drvdata(d); |
| 71 | struct nouveau_drm *drm = nouveau_drm(dev); | 71 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 72 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 72 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 73 | 73 | ||
| 74 | return snprintf(buf, PAGE_SIZE, "%d\n", | 74 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 75 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000); | 75 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000); |
| @@ -81,7 +81,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d, | |||
| 81 | { | 81 | { |
| 82 | struct drm_device *dev = dev_get_drvdata(d); | 82 | struct drm_device *dev = dev_get_drvdata(d); |
| 83 | struct nouveau_drm *drm = nouveau_drm(dev); | 83 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 84 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 84 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 85 | long value; | 85 | long value; |
| 86 | 86 | ||
| 87 | if (kstrtol(buf, 10, &value) == -EINVAL) | 87 | if (kstrtol(buf, 10, &value) == -EINVAL) |
| @@ -102,7 +102,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d, | |||
| 102 | { | 102 | { |
| 103 | struct drm_device *dev = dev_get_drvdata(d); | 103 | struct drm_device *dev = dev_get_drvdata(d); |
| 104 | struct nouveau_drm *drm = nouveau_drm(dev); | 104 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 105 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 105 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 106 | 106 | ||
| 107 | return snprintf(buf, PAGE_SIZE, "%d\n", | 107 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 108 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); | 108 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); |
| @@ -114,7 +114,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d, | |||
| 114 | { | 114 | { |
| 115 | struct drm_device *dev = dev_get_drvdata(d); | 115 | struct drm_device *dev = dev_get_drvdata(d); |
| 116 | struct nouveau_drm *drm = nouveau_drm(dev); | 116 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 117 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 117 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 118 | long value; | 118 | long value; |
| 119 | 119 | ||
| 120 | if (kstrtol(buf, 10, &value) == -EINVAL) | 120 | if (kstrtol(buf, 10, &value) == -EINVAL) |
| @@ -134,7 +134,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) | |||
| 134 | { | 134 | { |
| 135 | struct drm_device *dev = dev_get_drvdata(d); | 135 | struct drm_device *dev = dev_get_drvdata(d); |
| 136 | struct nouveau_drm *drm = nouveau_drm(dev); | 136 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 137 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 137 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 138 | 138 | ||
| 139 | return snprintf(buf, PAGE_SIZE, "%d\n", | 139 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 140 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK) * 1000); | 140 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK) * 1000); |
| @@ -145,7 +145,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, | |||
| 145 | { | 145 | { |
| 146 | struct drm_device *dev = dev_get_drvdata(d); | 146 | struct drm_device *dev = dev_get_drvdata(d); |
| 147 | struct nouveau_drm *drm = nouveau_drm(dev); | 147 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 148 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 148 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 149 | long value; | 149 | long value; |
| 150 | 150 | ||
| 151 | if (kstrtol(buf, 10, &value) == -EINVAL) | 151 | if (kstrtol(buf, 10, &value) == -EINVAL) |
| @@ -165,7 +165,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a, | |||
| 165 | { | 165 | { |
| 166 | struct drm_device *dev = dev_get_drvdata(d); | 166 | struct drm_device *dev = dev_get_drvdata(d); |
| 167 | struct nouveau_drm *drm = nouveau_drm(dev); | 167 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 168 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 168 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 169 | 169 | ||
| 170 | return snprintf(buf, PAGE_SIZE, "%d\n", | 170 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 171 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000); | 171 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000); |
| @@ -176,7 +176,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a, | |||
| 176 | { | 176 | { |
| 177 | struct drm_device *dev = dev_get_drvdata(d); | 177 | struct drm_device *dev = dev_get_drvdata(d); |
| 178 | struct nouveau_drm *drm = nouveau_drm(dev); | 178 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 179 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 179 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 180 | long value; | 180 | long value; |
| 181 | 181 | ||
| 182 | if (kstrtol(buf, 10, &value) == -EINVAL) | 182 | if (kstrtol(buf, 10, &value) == -EINVAL) |
| @@ -197,7 +197,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, | |||
| 197 | { | 197 | { |
| 198 | struct drm_device *dev = dev_get_drvdata(d); | 198 | struct drm_device *dev = dev_get_drvdata(d); |
| 199 | struct nouveau_drm *drm = nouveau_drm(dev); | 199 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 200 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 200 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 201 | 201 | ||
| 202 | return snprintf(buf, PAGE_SIZE, "%d\n", | 202 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 203 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL) * 1000); | 203 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL) * 1000); |
| @@ -209,7 +209,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, | |||
| 209 | { | 209 | { |
| 210 | struct drm_device *dev = dev_get_drvdata(d); | 210 | struct drm_device *dev = dev_get_drvdata(d); |
| 211 | struct nouveau_drm *drm = nouveau_drm(dev); | 211 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 212 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 212 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 213 | long value; | 213 | long value; |
| 214 | 214 | ||
| 215 | if (kstrtol(buf, 10, &value) == -EINVAL) | 215 | if (kstrtol(buf, 10, &value) == -EINVAL) |
| @@ -230,7 +230,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a, | |||
| 230 | { | 230 | { |
| 231 | struct drm_device *dev = dev_get_drvdata(d); | 231 | struct drm_device *dev = dev_get_drvdata(d); |
| 232 | struct nouveau_drm *drm = nouveau_drm(dev); | 232 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 233 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 233 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 234 | 234 | ||
| 235 | return snprintf(buf, PAGE_SIZE, "%d\n", | 235 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 236 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST) * 1000); | 236 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST) * 1000); |
| @@ -243,7 +243,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d, | |||
| 243 | { | 243 | { |
| 244 | struct drm_device *dev = dev_get_drvdata(d); | 244 | struct drm_device *dev = dev_get_drvdata(d); |
| 245 | struct nouveau_drm *drm = nouveau_drm(dev); | 245 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 246 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 246 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 247 | long value; | 247 | long value; |
| 248 | 248 | ||
| 249 | if (kstrtol(buf, 10, &value) == -EINVAL) | 249 | if (kstrtol(buf, 10, &value) == -EINVAL) |
| @@ -263,7 +263,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a, | |||
| 263 | { | 263 | { |
| 264 | struct drm_device *dev = dev_get_drvdata(d); | 264 | struct drm_device *dev = dev_get_drvdata(d); |
| 265 | struct nouveau_drm *drm = nouveau_drm(dev); | 265 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 266 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 266 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 267 | 267 | ||
| 268 | return snprintf(buf, PAGE_SIZE, "%d\n", | 268 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 269 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN) * 1000); | 269 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN) * 1000); |
| @@ -275,7 +275,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a, | |||
| 275 | { | 275 | { |
| 276 | struct drm_device *dev = dev_get_drvdata(d); | 276 | struct drm_device *dev = dev_get_drvdata(d); |
| 277 | struct nouveau_drm *drm = nouveau_drm(dev); | 277 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 278 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 278 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 279 | long value; | 279 | long value; |
| 280 | 280 | ||
| 281 | if (kstrtol(buf, 10, &value) == -EINVAL) | 281 | if (kstrtol(buf, 10, &value) == -EINVAL) |
| @@ -296,7 +296,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a, | |||
| 296 | { | 296 | { |
| 297 | struct drm_device *dev = dev_get_drvdata(d); | 297 | struct drm_device *dev = dev_get_drvdata(d); |
| 298 | struct nouveau_drm *drm = nouveau_drm(dev); | 298 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 299 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 299 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 300 | 300 | ||
| 301 | return snprintf(buf, PAGE_SIZE, "%d\n", | 301 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 302 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000); | 302 | therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000); |
| @@ -309,7 +309,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d, | |||
| 309 | { | 309 | { |
| 310 | struct drm_device *dev = dev_get_drvdata(d); | 310 | struct drm_device *dev = dev_get_drvdata(d); |
| 311 | struct nouveau_drm *drm = nouveau_drm(dev); | 311 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 312 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 312 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 313 | long value; | 313 | long value; |
| 314 | 314 | ||
| 315 | if (kstrtol(buf, 10, &value) == -EINVAL) | 315 | if (kstrtol(buf, 10, &value) == -EINVAL) |
| @@ -349,7 +349,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr, | |||
| 349 | { | 349 | { |
| 350 | struct drm_device *dev = dev_get_drvdata(d); | 350 | struct drm_device *dev = dev_get_drvdata(d); |
| 351 | struct nouveau_drm *drm = nouveau_drm(dev); | 351 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 352 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 352 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 353 | 353 | ||
| 354 | return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm)); | 354 | return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm)); |
| 355 | } | 355 | } |
| @@ -362,7 +362,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d, | |||
| 362 | { | 362 | { |
| 363 | struct drm_device *dev = dev_get_drvdata(d); | 363 | struct drm_device *dev = dev_get_drvdata(d); |
| 364 | struct nouveau_drm *drm = nouveau_drm(dev); | 364 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 365 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 365 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 366 | int ret; | 366 | int ret; |
| 367 | 367 | ||
| 368 | ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE); | 368 | ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE); |
| @@ -378,7 +378,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a, | |||
| 378 | { | 378 | { |
| 379 | struct drm_device *dev = dev_get_drvdata(d); | 379 | struct drm_device *dev = dev_get_drvdata(d); |
| 380 | struct nouveau_drm *drm = nouveau_drm(dev); | 380 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 381 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 381 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 382 | long value; | 382 | long value; |
| 383 | int ret; | 383 | int ret; |
| 384 | 384 | ||
| @@ -401,7 +401,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf) | |||
| 401 | { | 401 | { |
| 402 | struct drm_device *dev = dev_get_drvdata(d); | 402 | struct drm_device *dev = dev_get_drvdata(d); |
| 403 | struct nouveau_drm *drm = nouveau_drm(dev); | 403 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 404 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 404 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 405 | int ret; | 405 | int ret; |
| 406 | 406 | ||
| 407 | ret = therm->fan_get(therm); | 407 | ret = therm->fan_get(therm); |
| @@ -417,7 +417,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a, | |||
| 417 | { | 417 | { |
| 418 | struct drm_device *dev = dev_get_drvdata(d); | 418 | struct drm_device *dev = dev_get_drvdata(d); |
| 419 | struct nouveau_drm *drm = nouveau_drm(dev); | 419 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 420 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 420 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 421 | int ret = -ENODEV; | 421 | int ret = -ENODEV; |
| 422 | long value; | 422 | long value; |
| 423 | 423 | ||
| @@ -441,7 +441,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d, | |||
| 441 | { | 441 | { |
| 442 | struct drm_device *dev = dev_get_drvdata(d); | 442 | struct drm_device *dev = dev_get_drvdata(d); |
| 443 | struct nouveau_drm *drm = nouveau_drm(dev); | 443 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 444 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 444 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 445 | int ret; | 445 | int ret; |
| 446 | 446 | ||
| 447 | ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY); | 447 | ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY); |
| @@ -457,7 +457,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a, | |||
| 457 | { | 457 | { |
| 458 | struct drm_device *dev = dev_get_drvdata(d); | 458 | struct drm_device *dev = dev_get_drvdata(d); |
| 459 | struct nouveau_drm *drm = nouveau_drm(dev); | 459 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 460 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 460 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 461 | long value; | 461 | long value; |
| 462 | int ret; | 462 | int ret; |
| 463 | 463 | ||
| @@ -481,7 +481,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d, | |||
| 481 | { | 481 | { |
| 482 | struct drm_device *dev = dev_get_drvdata(d); | 482 | struct drm_device *dev = dev_get_drvdata(d); |
| 483 | struct nouveau_drm *drm = nouveau_drm(dev); | 483 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 484 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 484 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 485 | int ret; | 485 | int ret; |
| 486 | 486 | ||
| 487 | ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY); | 487 | ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY); |
| @@ -497,7 +497,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a, | |||
| 497 | { | 497 | { |
| 498 | struct drm_device *dev = dev_get_drvdata(d); | 498 | struct drm_device *dev = dev_get_drvdata(d); |
| 499 | struct nouveau_drm *drm = nouveau_drm(dev); | 499 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 500 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 500 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 501 | long value; | 501 | long value; |
| 502 | int ret; | 502 | int ret; |
| 503 | 503 | ||
| @@ -521,7 +521,7 @@ nouveau_hwmon_get_in0_input(struct device *d, | |||
| 521 | { | 521 | { |
| 522 | struct drm_device *dev = dev_get_drvdata(d); | 522 | struct drm_device *dev = dev_get_drvdata(d); |
| 523 | struct nouveau_drm *drm = nouveau_drm(dev); | 523 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 524 | struct nvkm_volt *volt = nvxx_volt(&drm->device); | 524 | struct nvkm_volt *volt = nvxx_volt(&drm->client.device); |
| 525 | int ret; | 525 | int ret; |
| 526 | 526 | ||
| 527 | ret = nvkm_volt_get(volt); | 527 | ret = nvkm_volt_get(volt); |
| @@ -540,7 +540,7 @@ nouveau_hwmon_get_in0_min(struct device *d, | |||
| 540 | { | 540 | { |
| 541 | struct drm_device *dev = dev_get_drvdata(d); | 541 | struct drm_device *dev = dev_get_drvdata(d); |
| 542 | struct nouveau_drm *drm = nouveau_drm(dev); | 542 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 543 | struct nvkm_volt *volt = nvxx_volt(&drm->device); | 543 | struct nvkm_volt *volt = nvxx_volt(&drm->client.device); |
| 544 | 544 | ||
| 545 | if (!volt || !volt->min_uv) | 545 | if (!volt || !volt->min_uv) |
| 546 | return -ENODEV; | 546 | return -ENODEV; |
| @@ -557,7 +557,7 @@ nouveau_hwmon_get_in0_max(struct device *d, | |||
| 557 | { | 557 | { |
| 558 | struct drm_device *dev = dev_get_drvdata(d); | 558 | struct drm_device *dev = dev_get_drvdata(d); |
| 559 | struct nouveau_drm *drm = nouveau_drm(dev); | 559 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 560 | struct nvkm_volt *volt = nvxx_volt(&drm->device); | 560 | struct nvkm_volt *volt = nvxx_volt(&drm->client.device); |
| 561 | 561 | ||
| 562 | if (!volt || !volt->max_uv) | 562 | if (!volt || !volt->max_uv) |
| 563 | return -ENODEV; | 563 | return -ENODEV; |
| @@ -584,7 +584,7 @@ nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a, | |||
| 584 | { | 584 | { |
| 585 | struct drm_device *dev = dev_get_drvdata(d); | 585 | struct drm_device *dev = dev_get_drvdata(d); |
| 586 | struct nouveau_drm *drm = nouveau_drm(dev); | 586 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 587 | struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device); | 587 | struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device); |
| 588 | int result = nvkm_iccsense_read_all(iccsense); | 588 | int result = nvkm_iccsense_read_all(iccsense); |
| 589 | 589 | ||
| 590 | if (result < 0) | 590 | if (result < 0) |
| @@ -596,6 +596,32 @@ nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a, | |||
| 596 | static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, | 596 | static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, |
| 597 | nouveau_hwmon_get_power1_input, NULL, 0); | 597 | nouveau_hwmon_get_power1_input, NULL, 0); |
| 598 | 598 | ||
| 599 | static ssize_t | ||
| 600 | nouveau_hwmon_get_power1_max(struct device *d, struct device_attribute *a, | ||
| 601 | char *buf) | ||
| 602 | { | ||
| 603 | struct drm_device *dev = dev_get_drvdata(d); | ||
| 604 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
| 605 | struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device); | ||
| 606 | return sprintf(buf, "%i\n", iccsense->power_w_max); | ||
| 607 | } | ||
| 608 | |||
| 609 | static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO, | ||
| 610 | nouveau_hwmon_get_power1_max, NULL, 0); | ||
| 611 | |||
| 612 | static ssize_t | ||
| 613 | nouveau_hwmon_get_power1_crit(struct device *d, struct device_attribute *a, | ||
| 614 | char *buf) | ||
| 615 | { | ||
| 616 | struct drm_device *dev = dev_get_drvdata(d); | ||
| 617 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
| 618 | struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device); | ||
| 619 | return sprintf(buf, "%i\n", iccsense->power_w_crit); | ||
| 620 | } | ||
| 621 | |||
| 622 | static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO, | ||
| 623 | nouveau_hwmon_get_power1_crit, NULL, 0); | ||
| 624 | |||
| 599 | static struct attribute *hwmon_default_attributes[] = { | 625 | static struct attribute *hwmon_default_attributes[] = { |
| 600 | &sensor_dev_attr_name.dev_attr.attr, | 626 | &sensor_dev_attr_name.dev_attr.attr, |
| 601 | &sensor_dev_attr_update_rate.dev_attr.attr, | 627 | &sensor_dev_attr_update_rate.dev_attr.attr, |
| @@ -639,6 +665,12 @@ static struct attribute *hwmon_power_attributes[] = { | |||
| 639 | NULL | 665 | NULL |
| 640 | }; | 666 | }; |
| 641 | 667 | ||
| 668 | static struct attribute *hwmon_power_caps_attributes[] = { | ||
| 669 | &sensor_dev_attr_power1_max.dev_attr.attr, | ||
| 670 | &sensor_dev_attr_power1_crit.dev_attr.attr, | ||
| 671 | NULL | ||
| 672 | }; | ||
| 673 | |||
| 642 | static const struct attribute_group hwmon_default_attrgroup = { | 674 | static const struct attribute_group hwmon_default_attrgroup = { |
| 643 | .attrs = hwmon_default_attributes, | 675 | .attrs = hwmon_default_attributes, |
| 644 | }; | 676 | }; |
| @@ -657,6 +689,9 @@ static const struct attribute_group hwmon_in0_attrgroup = { | |||
| 657 | static const struct attribute_group hwmon_power_attrgroup = { | 689 | static const struct attribute_group hwmon_power_attrgroup = { |
| 658 | .attrs = hwmon_power_attributes, | 690 | .attrs = hwmon_power_attributes, |
| 659 | }; | 691 | }; |
| 692 | static const struct attribute_group hwmon_power_caps_attrgroup = { | ||
| 693 | .attrs = hwmon_power_caps_attributes, | ||
| 694 | }; | ||
| 660 | #endif | 695 | #endif |
| 661 | 696 | ||
| 662 | int | 697 | int |
| @@ -664,9 +699,9 @@ nouveau_hwmon_init(struct drm_device *dev) | |||
| 664 | { | 699 | { |
| 665 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) | 700 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) |
| 666 | struct nouveau_drm *drm = nouveau_drm(dev); | 701 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 667 | struct nvkm_therm *therm = nvxx_therm(&drm->device); | 702 | struct nvkm_therm *therm = nvxx_therm(&drm->client.device); |
| 668 | struct nvkm_volt *volt = nvxx_volt(&drm->device); | 703 | struct nvkm_volt *volt = nvxx_volt(&drm->client.device); |
| 669 | struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device); | 704 | struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device); |
| 670 | struct nouveau_hwmon *hwmon; | 705 | struct nouveau_hwmon *hwmon; |
| 671 | struct device *hwmon_dev; | 706 | struct device *hwmon_dev; |
| 672 | int ret = 0; | 707 | int ret = 0; |
| @@ -728,8 +763,16 @@ nouveau_hwmon_init(struct drm_device *dev) | |||
| 728 | if (iccsense && iccsense->data_valid && !list_empty(&iccsense->rails)) { | 763 | if (iccsense && iccsense->data_valid && !list_empty(&iccsense->rails)) { |
| 729 | ret = sysfs_create_group(&hwmon_dev->kobj, | 764 | ret = sysfs_create_group(&hwmon_dev->kobj, |
| 730 | &hwmon_power_attrgroup); | 765 | &hwmon_power_attrgroup); |
| 766 | |||
| 731 | if (ret) | 767 | if (ret) |
| 732 | goto error; | 768 | goto error; |
| 769 | |||
| 770 | if (iccsense->power_w_max && iccsense->power_w_crit) { | ||
| 771 | ret = sysfs_create_group(&hwmon_dev->kobj, | ||
| 772 | &hwmon_power_caps_attrgroup); | ||
| 773 | if (ret) | ||
| 774 | goto error; | ||
| 775 | } | ||
| 733 | } | 776 | } |
| 734 | 777 | ||
| 735 | hwmon->hwmon = hwmon_dev; | 778 | hwmon->hwmon = hwmon_dev; |
| @@ -759,6 +802,7 @@ nouveau_hwmon_fini(struct drm_device *dev) | |||
| 759 | sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup); | 802 | sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup); |
| 760 | sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup); | 803 | sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup); |
| 761 | sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_attrgroup); | 804 | sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_attrgroup); |
| 805 | sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_caps_attrgroup); | ||
| 762 | 806 | ||
| 763 | hwmon_device_unregister(hwmon->hwmon); | 807 | hwmon_device_unregister(hwmon->hwmon); |
| 764 | } | 808 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c index 3e2f1b6cd4df..2c5e0628da12 100644 --- a/drivers/gpu/drm/nouveau/nouveau_led.c +++ b/drivers/gpu/drm/nouveau/nouveau_led.c | |||
| @@ -38,7 +38,7 @@ nouveau_led_get_brightness(struct led_classdev *led) | |||
| 38 | { | 38 | { |
| 39 | struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev; | 39 | struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev; |
| 40 | struct nouveau_drm *drm = nouveau_drm(drm_dev); | 40 | struct nouveau_drm *drm = nouveau_drm(drm_dev); |
| 41 | struct nvif_object *device = &drm->device.object; | 41 | struct nvif_object *device = &drm->client.device.object; |
| 42 | u32 div, duty; | 42 | u32 div, duty; |
| 43 | 43 | ||
| 44 | div = nvif_rd32(device, 0x61c880) & 0x00ffffff; | 44 | div = nvif_rd32(device, 0x61c880) & 0x00ffffff; |
| @@ -55,7 +55,7 @@ nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value) | |||
| 55 | { | 55 | { |
| 56 | struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev; | 56 | struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev; |
| 57 | struct nouveau_drm *drm = nouveau_drm(drm_dev); | 57 | struct nouveau_drm *drm = nouveau_drm(drm_dev); |
| 58 | struct nvif_object *device = &drm->device.object; | 58 | struct nvif_object *device = &drm->client.device.object; |
| 59 | 59 | ||
| 60 | u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */ | 60 | u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */ |
| 61 | u32 freq = 100; /* this is what nvidia uses and it should be good-enough */ | 61 | u32 freq = 100; /* this is what nvidia uses and it should be good-enough */ |
| @@ -78,7 +78,7 @@ int | |||
| 78 | nouveau_led_init(struct drm_device *dev) | 78 | nouveau_led_init(struct drm_device *dev) |
| 79 | { | 79 | { |
| 80 | struct nouveau_drm *drm = nouveau_drm(dev); | 80 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 81 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); | 81 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); |
| 82 | struct dcb_gpio_func logo_led; | 82 | struct dcb_gpio_func logo_led; |
| 83 | int ret; | 83 | int ret; |
| 84 | 84 | ||
| @@ -102,6 +102,7 @@ nouveau_led_init(struct drm_device *dev) | |||
| 102 | ret = led_classdev_register(dev->dev, &drm->led->led); | 102 | ret = led_classdev_register(dev->dev, &drm->led->led); |
| 103 | if (ret) { | 103 | if (ret) { |
| 104 | kfree(drm->led); | 104 | kfree(drm->led); |
| 105 | drm->led = NULL; | ||
| 105 | return ret; | 106 | return ret; |
| 106 | } | 107 | } |
| 107 | 108 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h index 187ecdb82002..21a5775028cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_led.h +++ b/drivers/gpu/drm/nouveau/nouveau_led.h | |||
| @@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev) | |||
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | /* nouveau_led.c */ | 44 | /* nouveau_led.c */ |
| 45 | #if IS_ENABLED(CONFIG_LEDS_CLASS) | 45 | #if IS_REACHABLE(CONFIG_LEDS_CLASS) |
| 46 | int nouveau_led_init(struct drm_device *dev); | 46 | int nouveau_led_init(struct drm_device *dev); |
| 47 | void nouveau_led_suspend(struct drm_device *dev); | 47 | void nouveau_led_suspend(struct drm_device *dev); |
| 48 | void nouveau_led_resume(struct drm_device *dev); | 48 | void nouveau_led_resume(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c index 15f0925ea13b..b3f29b1ce9ea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_nvif.c +++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c | |||
| @@ -60,20 +60,15 @@ nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack) | |||
| 60 | static int | 60 | static int |
| 61 | nvkm_client_resume(void *priv) | 61 | nvkm_client_resume(void *priv) |
| 62 | { | 62 | { |
| 63 | return nvkm_client_init(priv); | 63 | struct nvkm_client *client = priv; |
| 64 | return nvkm_object_init(&client->object); | ||
| 64 | } | 65 | } |
| 65 | 66 | ||
| 66 | static int | 67 | static int |
| 67 | nvkm_client_suspend(void *priv) | 68 | nvkm_client_suspend(void *priv) |
| 68 | { | 69 | { |
| 69 | return nvkm_client_fini(priv, true); | ||
| 70 | } | ||
| 71 | |||
| 72 | static void | ||
| 73 | nvkm_client_driver_fini(void *priv) | ||
| 74 | { | ||
| 75 | struct nvkm_client *client = priv; | 70 | struct nvkm_client *client = priv; |
| 76 | nvkm_client_del(&client); | 71 | return nvkm_object_fini(&client->object, true); |
| 77 | } | 72 | } |
| 78 | 73 | ||
| 79 | static int | 74 | static int |
| @@ -108,23 +103,14 @@ static int | |||
| 108 | nvkm_client_driver_init(const char *name, u64 device, const char *cfg, | 103 | nvkm_client_driver_init(const char *name, u64 device, const char *cfg, |
| 109 | const char *dbg, void **ppriv) | 104 | const char *dbg, void **ppriv) |
| 110 | { | 105 | { |
| 111 | struct nvkm_client *client; | 106 | return nvkm_client_new(name, device, cfg, dbg, nvkm_client_ntfy, |
| 112 | int ret; | 107 | (struct nvkm_client **)ppriv); |
| 113 | |||
| 114 | ret = nvkm_client_new(name, device, cfg, dbg, &client); | ||
| 115 | *ppriv = client; | ||
| 116 | if (ret) | ||
| 117 | return ret; | ||
| 118 | |||
| 119 | client->ntfy = nvkm_client_ntfy; | ||
| 120 | return 0; | ||
| 121 | } | 108 | } |
| 122 | 109 | ||
| 123 | const struct nvif_driver | 110 | const struct nvif_driver |
| 124 | nvif_driver_nvkm = { | 111 | nvif_driver_nvkm = { |
| 125 | .name = "nvkm", | 112 | .name = "nvkm", |
| 126 | .init = nvkm_client_driver_init, | 113 | .init = nvkm_client_driver_init, |
| 127 | .fini = nvkm_client_driver_fini, | ||
| 128 | .suspend = nvkm_client_suspend, | 114 | .suspend = nvkm_client_suspend, |
| 129 | .resume = nvkm_client_resume, | 115 | .resume = nvkm_client_resume, |
| 130 | .ioctl = nvkm_client_ioctl, | 116 | .ioctl = nvkm_client_ioctl, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index a0a9704cfe2b..1fefc93af1d7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c | |||
| @@ -60,6 +60,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, | |||
| 60 | struct dma_buf_attachment *attach, | 60 | struct dma_buf_attachment *attach, |
| 61 | struct sg_table *sg) | 61 | struct sg_table *sg) |
| 62 | { | 62 | { |
| 63 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
| 63 | struct nouveau_bo *nvbo; | 64 | struct nouveau_bo *nvbo; |
| 64 | struct reservation_object *robj = attach->dmabuf->resv; | 65 | struct reservation_object *robj = attach->dmabuf->resv; |
| 65 | u32 flags = 0; | 66 | u32 flags = 0; |
| @@ -68,7 +69,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, | |||
| 68 | flags = TTM_PL_FLAG_TT; | 69 | flags = TTM_PL_FLAG_TT; |
| 69 | 70 | ||
| 70 | ww_mutex_lock(&robj->lock, NULL); | 71 | ww_mutex_lock(&robj->lock, NULL); |
| 71 | ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0, | 72 | ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0, |
| 72 | sg, robj, &nvbo); | 73 | sg, robj, &nvbo); |
| 73 | ww_mutex_unlock(&robj->lock); | 74 | ww_mutex_unlock(&robj->lock); |
| 74 | if (ret) | 75 | if (ret) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index db35ab5883ac..b7ab268f7d6f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
| @@ -24,10 +24,10 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm) | |||
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | static int | 26 | static int |
| 27 | nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | 27 | nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) |
| 28 | { | 28 | { |
| 29 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | 29 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
| 30 | struct nvkm_mem *node = mem->mm_node; | 30 | struct nvkm_mem *node = reg->mm_node; |
| 31 | 31 | ||
| 32 | if (ttm->sg) { | 32 | if (ttm->sg) { |
| 33 | node->sg = ttm->sg; | 33 | node->sg = ttm->sg; |
| @@ -36,7 +36,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
| 36 | node->sg = NULL; | 36 | node->sg = NULL; |
| 37 | node->pages = nvbe->ttm.dma_address; | 37 | node->pages = nvbe->ttm.dma_address; |
| 38 | } | 38 | } |
| 39 | node->size = (mem->num_pages << PAGE_SHIFT) >> 12; | 39 | node->size = (reg->num_pages << PAGE_SHIFT) >> 12; |
| 40 | 40 | ||
| 41 | nvkm_vm_map(&node->vma[0], node); | 41 | nvkm_vm_map(&node->vma[0], node); |
| 42 | nvbe->node = node; | 42 | nvbe->node = node; |
| @@ -58,10 +58,10 @@ static struct ttm_backend_func nv04_sgdma_backend = { | |||
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | static int | 60 | static int |
| 61 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | 61 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) |
| 62 | { | 62 | { |
| 63 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; | 63 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
| 64 | struct nvkm_mem *node = mem->mm_node; | 64 | struct nvkm_mem *node = reg->mm_node; |
| 65 | 65 | ||
| 66 | /* noop: bound in move_notify() */ | 66 | /* noop: bound in move_notify() */ |
| 67 | if (ttm->sg) { | 67 | if (ttm->sg) { |
| @@ -71,7 +71,7 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) | |||
| 71 | node->sg = NULL; | 71 | node->sg = NULL; |
| 72 | node->pages = nvbe->ttm.dma_address; | 72 | node->pages = nvbe->ttm.dma_address; |
| 73 | } | 73 | } |
| 74 | node->size = (mem->num_pages << PAGE_SHIFT) >> 12; | 74 | node->size = (reg->num_pages << PAGE_SHIFT) >> 12; |
| 75 | return 0; | 75 | return 0; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| @@ -100,7 +100,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, | |||
| 100 | if (!nvbe) | 100 | if (!nvbe) |
| 101 | return NULL; | 101 | return NULL; |
| 102 | 102 | ||
| 103 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) | 103 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) |
| 104 | nvbe->ttm.ttm.func = &nv04_sgdma_backend; | 104 | nvbe->ttm.ttm.func = &nv04_sgdma_backend; |
| 105 | else | 105 | else |
| 106 | nvbe->ttm.ttm.func = &nv50_sgdma_backend; | 106 | nvbe->ttm.ttm.func = &nv50_sgdma_backend; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index ec4668a41e01..13e5cc5f07fe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c | |||
| @@ -36,7 +36,7 @@ static int | |||
| 36 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | 36 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
| 37 | { | 37 | { |
| 38 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); | 38 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
| 39 | struct nvkm_fb *fb = nvxx_fb(&drm->device); | 39 | struct nvkm_fb *fb = nvxx_fb(&drm->client.device); |
| 40 | man->priv = fb; | 40 | man->priv = fb; |
| 41 | return 0; | 41 | return 0; |
| 42 | } | 42 | } |
| @@ -64,45 +64,45 @@ nvkm_mem_node_cleanup(struct nvkm_mem *node) | |||
| 64 | 64 | ||
| 65 | static void | 65 | static void |
| 66 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | 66 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, |
| 67 | struct ttm_mem_reg *mem) | 67 | struct ttm_mem_reg *reg) |
| 68 | { | 68 | { |
| 69 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); | 69 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
| 70 | struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram; | 70 | struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram; |
| 71 | nvkm_mem_node_cleanup(mem->mm_node); | 71 | nvkm_mem_node_cleanup(reg->mm_node); |
| 72 | ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node); | 72 | ram->func->put(ram, (struct nvkm_mem **)®->mm_node); |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | static int | 75 | static int |
| 76 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | 76 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, |
| 77 | struct ttm_buffer_object *bo, | 77 | struct ttm_buffer_object *bo, |
| 78 | const struct ttm_place *place, | 78 | const struct ttm_place *place, |
| 79 | struct ttm_mem_reg *mem) | 79 | struct ttm_mem_reg *reg) |
| 80 | { | 80 | { |
| 81 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); | 81 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
| 82 | struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram; | 82 | struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram; |
| 83 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 83 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 84 | struct nvkm_mem *node; | 84 | struct nvkm_mem *node; |
| 85 | u32 size_nc = 0; | 85 | u32 size_nc = 0; |
| 86 | int ret; | 86 | int ret; |
| 87 | 87 | ||
| 88 | if (drm->device.info.ram_size == 0) | 88 | if (drm->client.device.info.ram_size == 0) |
| 89 | return -ENOMEM; | 89 | return -ENOMEM; |
| 90 | 90 | ||
| 91 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) | 91 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) |
| 92 | size_nc = 1 << nvbo->page_shift; | 92 | size_nc = 1 << nvbo->page_shift; |
| 93 | 93 | ||
| 94 | ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT, | 94 | ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT, |
| 95 | mem->page_alignment << PAGE_SHIFT, size_nc, | 95 | reg->page_alignment << PAGE_SHIFT, size_nc, |
| 96 | (nvbo->tile_flags >> 8) & 0x3ff, &node); | 96 | (nvbo->tile_flags >> 8) & 0x3ff, &node); |
| 97 | if (ret) { | 97 | if (ret) { |
| 98 | mem->mm_node = NULL; | 98 | reg->mm_node = NULL; |
| 99 | return (ret == -ENOSPC) ? 0 : ret; | 99 | return (ret == -ENOSPC) ? 0 : ret; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | node->page_shift = nvbo->page_shift; | 102 | node->page_shift = nvbo->page_shift; |
| 103 | 103 | ||
| 104 | mem->mm_node = node; | 104 | reg->mm_node = node; |
| 105 | mem->start = node->offset >> PAGE_SHIFT; | 105 | reg->start = node->offset >> PAGE_SHIFT; |
| 106 | return 0; | 106 | return 0; |
| 107 | } | 107 | } |
| 108 | 108 | ||
| @@ -127,18 +127,18 @@ nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) | |||
| 127 | 127 | ||
| 128 | static void | 128 | static void |
| 129 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, | 129 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, |
| 130 | struct ttm_mem_reg *mem) | 130 | struct ttm_mem_reg *reg) |
| 131 | { | 131 | { |
| 132 | nvkm_mem_node_cleanup(mem->mm_node); | 132 | nvkm_mem_node_cleanup(reg->mm_node); |
| 133 | kfree(mem->mm_node); | 133 | kfree(reg->mm_node); |
| 134 | mem->mm_node = NULL; | 134 | reg->mm_node = NULL; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static int | 137 | static int |
| 138 | nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | 138 | nouveau_gart_manager_new(struct ttm_mem_type_manager *man, |
| 139 | struct ttm_buffer_object *bo, | 139 | struct ttm_buffer_object *bo, |
| 140 | const struct ttm_place *place, | 140 | const struct ttm_place *place, |
| 141 | struct ttm_mem_reg *mem) | 141 | struct ttm_mem_reg *reg) |
| 142 | { | 142 | { |
| 143 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 143 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 144 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 144 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| @@ -150,7 +150,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |||
| 150 | 150 | ||
| 151 | node->page_shift = 12; | 151 | node->page_shift = 12; |
| 152 | 152 | ||
| 153 | switch (drm->device.info.family) { | 153 | switch (drm->client.device.info.family) { |
| 154 | case NV_DEVICE_INFO_V0_TNT: | 154 | case NV_DEVICE_INFO_V0_TNT: |
| 155 | case NV_DEVICE_INFO_V0_CELSIUS: | 155 | case NV_DEVICE_INFO_V0_CELSIUS: |
| 156 | case NV_DEVICE_INFO_V0_KELVIN: | 156 | case NV_DEVICE_INFO_V0_KELVIN: |
| @@ -158,7 +158,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |||
| 158 | case NV_DEVICE_INFO_V0_CURIE: | 158 | case NV_DEVICE_INFO_V0_CURIE: |
| 159 | break; | 159 | break; |
| 160 | case NV_DEVICE_INFO_V0_TESLA: | 160 | case NV_DEVICE_INFO_V0_TESLA: |
| 161 | if (drm->device.info.chipset != 0x50) | 161 | if (drm->client.device.info.chipset != 0x50) |
| 162 | node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; | 162 | node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; |
| 163 | break; | 163 | break; |
| 164 | case NV_DEVICE_INFO_V0_FERMI: | 164 | case NV_DEVICE_INFO_V0_FERMI: |
| @@ -169,12 +169,12 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |||
| 169 | break; | 169 | break; |
| 170 | default: | 170 | default: |
| 171 | NV_WARN(drm, "%s: unhandled family type %x\n", __func__, | 171 | NV_WARN(drm, "%s: unhandled family type %x\n", __func__, |
| 172 | drm->device.info.family); | 172 | drm->client.device.info.family); |
| 173 | break; | 173 | break; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | mem->mm_node = node; | 176 | reg->mm_node = node; |
| 177 | mem->start = 0; | 177 | reg->start = 0; |
| 178 | return 0; | 178 | return 0; |
| 179 | } | 179 | } |
| 180 | 180 | ||
| @@ -197,7 +197,7 @@ static int | |||
| 197 | nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | 197 | nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
| 198 | { | 198 | { |
| 199 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); | 199 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
| 200 | struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); | 200 | struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device); |
| 201 | struct nv04_mmu *priv = (void *)mmu; | 201 | struct nv04_mmu *priv = (void *)mmu; |
| 202 | struct nvkm_vm *vm = NULL; | 202 | struct nvkm_vm *vm = NULL; |
| 203 | nvkm_vm_ref(priv->vm, &vm, NULL); | 203 | nvkm_vm_ref(priv->vm, &vm, NULL); |
| @@ -215,20 +215,20 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man) | |||
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static void | 217 | static void |
| 218 | nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) | 218 | nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg) |
| 219 | { | 219 | { |
| 220 | struct nvkm_mem *node = mem->mm_node; | 220 | struct nvkm_mem *node = reg->mm_node; |
| 221 | if (node->vma[0].node) | 221 | if (node->vma[0].node) |
| 222 | nvkm_vm_put(&node->vma[0]); | 222 | nvkm_vm_put(&node->vma[0]); |
| 223 | kfree(mem->mm_node); | 223 | kfree(reg->mm_node); |
| 224 | mem->mm_node = NULL; | 224 | reg->mm_node = NULL; |
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | static int | 227 | static int |
| 228 | nv04_gart_manager_new(struct ttm_mem_type_manager *man, | 228 | nv04_gart_manager_new(struct ttm_mem_type_manager *man, |
| 229 | struct ttm_buffer_object *bo, | 229 | struct ttm_buffer_object *bo, |
| 230 | const struct ttm_place *place, | 230 | const struct ttm_place *place, |
| 231 | struct ttm_mem_reg *mem) | 231 | struct ttm_mem_reg *reg) |
| 232 | { | 232 | { |
| 233 | struct nvkm_mem *node; | 233 | struct nvkm_mem *node; |
| 234 | int ret; | 234 | int ret; |
| @@ -239,15 +239,15 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, | |||
| 239 | 239 | ||
| 240 | node->page_shift = 12; | 240 | node->page_shift = 12; |
| 241 | 241 | ||
| 242 | ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift, | 242 | ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift, |
| 243 | NV_MEM_ACCESS_RW, &node->vma[0]); | 243 | NV_MEM_ACCESS_RW, &node->vma[0]); |
| 244 | if (ret) { | 244 | if (ret) { |
| 245 | kfree(node); | 245 | kfree(node); |
| 246 | return ret; | 246 | return ret; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | mem->mm_node = node; | 249 | reg->mm_node = node; |
| 250 | mem->start = node->vma[0].offset >> PAGE_SHIFT; | 250 | reg->start = node->vma[0].offset >> PAGE_SHIFT; |
| 251 | return 0; | 251 | return 0; |
| 252 | } | 252 | } |
| 253 | 253 | ||
| @@ -339,7 +339,7 @@ nouveau_ttm_global_release(struct nouveau_drm *drm) | |||
| 339 | int | 339 | int |
| 340 | nouveau_ttm_init(struct nouveau_drm *drm) | 340 | nouveau_ttm_init(struct nouveau_drm *drm) |
| 341 | { | 341 | { |
| 342 | struct nvkm_device *device = nvxx_device(&drm->device); | 342 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 343 | struct nvkm_pci *pci = device->pci; | 343 | struct nvkm_pci *pci = device->pci; |
| 344 | struct drm_device *dev = drm->dev; | 344 | struct drm_device *dev = drm->dev; |
| 345 | u8 bits; | 345 | u8 bits; |
| @@ -352,8 +352,8 @@ nouveau_ttm_init(struct nouveau_drm *drm) | |||
| 352 | drm->agp.cma = pci->agp.cma; | 352 | drm->agp.cma = pci->agp.cma; |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | bits = nvxx_mmu(&drm->device)->dma_bits; | 355 | bits = nvxx_mmu(&drm->client.device)->dma_bits; |
| 356 | if (nvxx_device(&drm->device)->func->pci) { | 356 | if (nvxx_device(&drm->client.device)->func->pci) { |
| 357 | if (drm->agp.bridge) | 357 | if (drm->agp.bridge) |
| 358 | bits = 32; | 358 | bits = 32; |
| 359 | } else if (device->func->tegra) { | 359 | } else if (device->func->tegra) { |
| @@ -396,7 +396,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) | |||
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | /* VRAM init */ | 398 | /* VRAM init */ |
| 399 | drm->gem.vram_available = drm->device.info.ram_user; | 399 | drm->gem.vram_available = drm->client.device.info.ram_user; |
| 400 | 400 | ||
| 401 | arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1), | 401 | arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1), |
| 402 | device->func->resource_size(device, 1)); | 402 | device->func->resource_size(device, 1)); |
| @@ -413,7 +413,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) | |||
| 413 | 413 | ||
| 414 | /* GART init */ | 414 | /* GART init */ |
| 415 | if (!drm->agp.bridge) { | 415 | if (!drm->agp.bridge) { |
| 416 | drm->gem.gart_available = nvxx_mmu(&drm->device)->limit; | 416 | drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit; |
| 417 | } else { | 417 | } else { |
| 418 | drm->gem.gart_available = drm->agp.size; | 418 | drm->gem.gart_available = drm->agp.size; |
| 419 | } | 419 | } |
| @@ -433,7 +433,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) | |||
| 433 | void | 433 | void |
| 434 | nouveau_ttm_fini(struct nouveau_drm *drm) | 434 | nouveau_ttm_fini(struct nouveau_drm *drm) |
| 435 | { | 435 | { |
| 436 | struct nvkm_device *device = nvxx_device(&drm->device); | 436 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
| 437 | 437 | ||
| 438 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); | 438 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); |
| 439 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); | 439 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 08f9c6fa0f7f..afbdbed1a690 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c | |||
| @@ -103,7 +103,7 @@ usif_notify(const void *header, u32 length, const void *data, u32 size) | |||
| 103 | } | 103 | } |
| 104 | break; | 104 | break; |
| 105 | default: | 105 | default: |
| 106 | BUG_ON(1); | 106 | BUG(); |
| 107 | break; | 107 | break; |
| 108 | } | 108 | } |
| 109 | 109 | ||
| @@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) | |||
| 313 | if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { | 313 | if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { |
| 314 | /* block access to objects not created via this interface */ | 314 | /* block access to objects not created via this interface */ |
| 315 | owner = argv->v0.owner; | 315 | owner = argv->v0.owner; |
| 316 | if (argv->v0.object == 0ULL) | 316 | if (argv->v0.object == 0ULL && |
| 317 | argv->v0.type != NVIF_IOCTL_V0_DEL) | ||
| 317 | argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ | 318 | argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ |
| 318 | else | 319 | else |
| 319 | argv->v0.owner = NVDRM_OBJECT_USIF; | 320 | argv->v0.owner = NVDRM_OBJECT_USIF; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index c6a180a0c284..eef22c6b9665 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c | |||
| @@ -13,13 +13,13 @@ static unsigned int | |||
| 13 | nouveau_vga_set_decode(void *priv, bool state) | 13 | nouveau_vga_set_decode(void *priv, bool state) |
| 14 | { | 14 | { |
| 15 | struct nouveau_drm *drm = nouveau_drm(priv); | 15 | struct nouveau_drm *drm = nouveau_drm(priv); |
| 16 | struct nvif_object *device = &drm->device.object; | 16 | struct nvif_object *device = &drm->client.device.object; |
| 17 | 17 | ||
| 18 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE && | 18 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE && |
| 19 | drm->device.info.chipset >= 0x4c) | 19 | drm->client.device.info.chipset >= 0x4c) |
| 20 | nvif_wr32(device, 0x088060, state); | 20 | nvif_wr32(device, 0x088060, state); |
| 21 | else | 21 | else |
| 22 | if (drm->device.info.chipset >= 0x40) | 22 | if (drm->client.device.info.chipset >= 0x40) |
| 23 | nvif_wr32(device, 0x088054, state); | 23 | nvif_wr32(device, 0x088054, state); |
| 24 | else | 24 | else |
| 25 | nvif_wr32(device, 0x001854, state); | 25 | nvif_wr32(device, 0x001854, state); |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 6a2b187e3c3b..01731dbeb3d8 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
| @@ -136,7 +136,7 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
| 136 | struct drm_device *dev = nfbdev->helper.dev; | 136 | struct drm_device *dev = nfbdev->helper.dev; |
| 137 | struct nouveau_drm *drm = nouveau_drm(dev); | 137 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 138 | struct nouveau_channel *chan = drm->channel; | 138 | struct nouveau_channel *chan = drm->channel; |
| 139 | struct nvif_device *device = &drm->device; | 139 | struct nvif_device *device = &drm->client.device; |
| 140 | int surface_fmt, pattern_fmt, rect_fmt; | 140 | int surface_fmt, pattern_fmt, rect_fmt; |
| 141 | int ret; | 141 | int ret; |
| 142 | 142 | ||
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 79bc01111351..6477b7069e14 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
| @@ -76,9 +76,9 @@ nv17_fence_context_new(struct nouveau_channel *chan) | |||
| 76 | { | 76 | { |
| 77 | struct nv10_fence_priv *priv = chan->drm->fence; | 77 | struct nv10_fence_priv *priv = chan->drm->fence; |
| 78 | struct nv10_fence_chan *fctx; | 78 | struct nv10_fence_chan *fctx; |
| 79 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 79 | struct ttm_mem_reg *reg = &priv->bo->bo.mem; |
| 80 | u32 start = mem->start * PAGE_SIZE; | 80 | u32 start = reg->start * PAGE_SIZE; |
| 81 | u32 limit = start + mem->size - 1; | 81 | u32 limit = start + reg->size - 1; |
| 82 | int ret = 0; | 82 | int ret = 0; |
| 83 | 83 | ||
| 84 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 84 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
| @@ -129,7 +129,7 @@ nv17_fence_create(struct nouveau_drm *drm) | |||
| 129 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); | 129 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
| 130 | spin_lock_init(&priv->lock); | 130 | spin_lock_init(&priv->lock); |
| 131 | 131 | ||
| 132 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 132 | ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
| 133 | 0, 0x0000, NULL, NULL, &priv->bo); | 133 | 0, 0x0000, NULL, NULL, &priv->bo); |
| 134 | if (!ret) { | 134 | if (!ret) { |
| 135 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); | 135 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 413b178c86b2..2517adbe7089 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -447,18 +447,18 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb) | |||
| 447 | args.base.target = NV_DMA_V0_TARGET_VRAM; | 447 | args.base.target = NV_DMA_V0_TARGET_VRAM; |
| 448 | args.base.access = NV_DMA_V0_ACCESS_RDWR; | 448 | args.base.access = NV_DMA_V0_ACCESS_RDWR; |
| 449 | args.base.start = 0; | 449 | args.base.start = 0; |
| 450 | args.base.limit = drm->device.info.ram_user - 1; | 450 | args.base.limit = drm->client.device.info.ram_user - 1; |
| 451 | 451 | ||
| 452 | if (drm->device.info.chipset < 0x80) { | 452 | if (drm->client.device.info.chipset < 0x80) { |
| 453 | args.nv50.part = NV50_DMA_V0_PART_256; | 453 | args.nv50.part = NV50_DMA_V0_PART_256; |
| 454 | argc += sizeof(args.nv50); | 454 | argc += sizeof(args.nv50); |
| 455 | } else | 455 | } else |
| 456 | if (drm->device.info.chipset < 0xc0) { | 456 | if (drm->client.device.info.chipset < 0xc0) { |
| 457 | args.nv50.part = NV50_DMA_V0_PART_256; | 457 | args.nv50.part = NV50_DMA_V0_PART_256; |
| 458 | args.nv50.kind = kind; | 458 | args.nv50.kind = kind; |
| 459 | argc += sizeof(args.nv50); | 459 | argc += sizeof(args.nv50); |
| 460 | } else | 460 | } else |
| 461 | if (drm->device.info.chipset < 0xd0) { | 461 | if (drm->client.device.info.chipset < 0xd0) { |
| 462 | args.gf100.kind = kind; | 462 | args.gf100.kind = kind; |
| 463 | argc += sizeof(args.gf100); | 463 | argc += sizeof(args.gf100); |
| 464 | } else { | 464 | } else { |
| @@ -852,7 +852,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, | |||
| 852 | 852 | ||
| 853 | if (asyw->image.kind) { | 853 | if (asyw->image.kind) { |
| 854 | asyw->image.layout = 0; | 854 | asyw->image.layout = 0; |
| 855 | if (drm->device.info.chipset >= 0xc0) | 855 | if (drm->client.device.info.chipset >= 0xc0) |
| 856 | asyw->image.block = fb->nvbo->tile_mode >> 4; | 856 | asyw->image.block = fb->nvbo->tile_mode >> 4; |
| 857 | else | 857 | else |
| 858 | asyw->image.block = fb->nvbo->tile_mode; | 858 | asyw->image.block = fb->nvbo->tile_mode; |
| @@ -1404,7 +1404,7 @@ nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) | |||
| 1404 | { | 1404 | { |
| 1405 | struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); | 1405 | struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); |
| 1406 | struct nv50_disp *disp = nv50_disp(wndw->plane.dev); | 1406 | struct nv50_disp *disp = nv50_disp(wndw->plane.dev); |
| 1407 | if (nvif_msec(&drm->device, 2000ULL, | 1407 | if (nvif_msec(&drm->client.device, 2000ULL, |
| 1408 | u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4); | 1408 | u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4); |
| 1409 | if ((data & 0xc0000000) == 0x40000000) | 1409 | if ((data & 0xc0000000) == 0x40000000) |
| 1410 | break; | 1410 | break; |
| @@ -1529,7 +1529,7 @@ nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head, | |||
| 1529 | return ret; | 1529 | return ret; |
| 1530 | } | 1530 | } |
| 1531 | 1531 | ||
| 1532 | ret = nv50_base_create(&drm->device, disp->disp, base->id, | 1532 | ret = nv50_base_create(&drm->client.device, disp->disp, base->id, |
| 1533 | disp->sync->bo.offset, &base->chan); | 1533 | disp->sync->bo.offset, &base->chan); |
| 1534 | if (ret) | 1534 | if (ret) |
| 1535 | return ret; | 1535 | return ret; |
| @@ -2330,7 +2330,7 @@ static int | |||
| 2330 | nv50_head_create(struct drm_device *dev, int index) | 2330 | nv50_head_create(struct drm_device *dev, int index) |
| 2331 | { | 2331 | { |
| 2332 | struct nouveau_drm *drm = nouveau_drm(dev); | 2332 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 2333 | struct nvif_device *device = &drm->device; | 2333 | struct nvif_device *device = &drm->client.device; |
| 2334 | struct nv50_disp *disp = nv50_disp(dev); | 2334 | struct nv50_disp *disp = nv50_disp(dev); |
| 2335 | struct nv50_head *head; | 2335 | struct nv50_head *head; |
| 2336 | struct nv50_base *base; | 2336 | struct nv50_base *base; |
| @@ -2364,7 +2364,7 @@ nv50_head_create(struct drm_device *dev, int index) | |||
| 2364 | drm_crtc_helper_add(crtc, &nv50_head_help); | 2364 | drm_crtc_helper_add(crtc, &nv50_head_help); |
| 2365 | drm_mode_crtc_set_gamma_size(crtc, 256); | 2365 | drm_mode_crtc_set_gamma_size(crtc, 256); |
| 2366 | 2366 | ||
| 2367 | ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, | 2367 | ret = nouveau_bo_new(&drm->client, 8192, 0x100, TTM_PL_FLAG_VRAM, |
| 2368 | 0, 0x0000, NULL, NULL, &head->base.lut.nvbo); | 2368 | 0, 0x0000, NULL, NULL, &head->base.lut.nvbo); |
| 2369 | if (!ret) { | 2369 | if (!ret) { |
| 2370 | ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true); | 2370 | ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true); |
| @@ -2603,7 +2603,7 @@ static int | |||
| 2603 | nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) | 2603 | nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) |
| 2604 | { | 2604 | { |
| 2605 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 2605 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
| 2606 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); | 2606 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); |
| 2607 | struct nvkm_i2c_bus *bus; | 2607 | struct nvkm_i2c_bus *bus; |
| 2608 | struct nouveau_encoder *nv_encoder; | 2608 | struct nouveau_encoder *nv_encoder; |
| 2609 | struct drm_encoder *encoder; | 2609 | struct drm_encoder *encoder; |
| @@ -3559,7 +3559,7 @@ nv50_sor_enable(struct drm_encoder *encoder) | |||
| 3559 | nv50_audio_enable(encoder, mode); | 3559 | nv50_audio_enable(encoder, mode); |
| 3560 | break; | 3560 | break; |
| 3561 | default: | 3561 | default: |
| 3562 | BUG_ON(1); | 3562 | BUG(); |
| 3563 | break; | 3563 | break; |
| 3564 | } | 3564 | } |
| 3565 | 3565 | ||
| @@ -3593,7 +3593,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) | |||
| 3593 | { | 3593 | { |
| 3594 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 3594 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
| 3595 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 3595 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
| 3596 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); | 3596 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); |
| 3597 | struct nouveau_encoder *nv_encoder; | 3597 | struct nouveau_encoder *nv_encoder; |
| 3598 | struct drm_encoder *encoder; | 3598 | struct drm_encoder *encoder; |
| 3599 | int type, ret; | 3599 | int type, ret; |
| @@ -3732,7 +3732,7 @@ nv50_pior_enable(struct drm_encoder *encoder) | |||
| 3732 | proto = 0x0; | 3732 | proto = 0x0; |
| 3733 | break; | 3733 | break; |
| 3734 | default: | 3734 | default: |
| 3735 | BUG_ON(1); | 3735 | BUG(); |
| 3736 | break; | 3736 | break; |
| 3737 | } | 3737 | } |
| 3738 | 3738 | ||
| @@ -3778,7 +3778,7 @@ static int | |||
| 3778 | nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) | 3778 | nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) |
| 3779 | { | 3779 | { |
| 3780 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 3780 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
| 3781 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); | 3781 | struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); |
| 3782 | struct nvkm_i2c_bus *bus = NULL; | 3782 | struct nvkm_i2c_bus *bus = NULL; |
| 3783 | struct nvkm_i2c_aux *aux = NULL; | 3783 | struct nvkm_i2c_aux *aux = NULL; |
| 3784 | struct i2c_adapter *ddc; | 3784 | struct i2c_adapter *ddc; |
| @@ -3851,7 +3851,7 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock) | |||
| 3851 | evo_data(push, 0x00000000); | 3851 | evo_data(push, 0x00000000); |
| 3852 | nouveau_bo_wr32(disp->sync, 0, 0x00000000); | 3852 | nouveau_bo_wr32(disp->sync, 0, 0x00000000); |
| 3853 | evo_kick(push, core); | 3853 | evo_kick(push, core); |
| 3854 | if (nvif_msec(&drm->device, 2000ULL, | 3854 | if (nvif_msec(&drm->client.device, 2000ULL, |
| 3855 | if (nouveau_bo_rd32(disp->sync, 0)) | 3855 | if (nouveau_bo_rd32(disp->sync, 0)) |
| 3856 | break; | 3856 | break; |
| 3857 | usleep_range(1, 2); | 3857 | usleep_range(1, 2); |
| @@ -3986,6 +3986,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 3986 | } | 3986 | } |
| 3987 | } | 3987 | } |
| 3988 | 3988 | ||
| 3989 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
| 3990 | if (crtc->state->event) | ||
| 3991 | drm_crtc_vblank_get(crtc); | ||
| 3992 | } | ||
| 3993 | |||
| 3989 | /* Update plane(s). */ | 3994 | /* Update plane(s). */ |
| 3990 | for_each_plane_in_state(state, plane, plane_state, i) { | 3995 | for_each_plane_in_state(state, plane, plane_state, i) { |
| 3991 | struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); | 3996 | struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); |
| @@ -4035,6 +4040,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4035 | drm_crtc_send_vblank_event(crtc, crtc->state->event); | 4040 | drm_crtc_send_vblank_event(crtc, crtc->state->event); |
| 4036 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 4041 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
| 4037 | crtc->state->event = NULL; | 4042 | crtc->state->event = NULL; |
| 4043 | drm_crtc_vblank_put(crtc); | ||
| 4038 | } | 4044 | } |
| 4039 | } | 4045 | } |
| 4040 | 4046 | ||
| @@ -4363,7 +4369,7 @@ module_param_named(atomic, nouveau_atomic, int, 0400); | |||
| 4363 | int | 4369 | int |
| 4364 | nv50_display_create(struct drm_device *dev) | 4370 | nv50_display_create(struct drm_device *dev) |
| 4365 | { | 4371 | { |
| 4366 | struct nvif_device *device = &nouveau_drm(dev)->device; | 4372 | struct nvif_device *device = &nouveau_drm(dev)->client.device; |
| 4367 | struct nouveau_drm *drm = nouveau_drm(dev); | 4373 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 4368 | struct dcb_table *dcb = &drm->vbios.dcb; | 4374 | struct dcb_table *dcb = &drm->vbios.dcb; |
| 4369 | struct drm_connector *connector, *tmp; | 4375 | struct drm_connector *connector, *tmp; |
| @@ -4387,7 +4393,7 @@ nv50_display_create(struct drm_device *dev) | |||
| 4387 | dev->driver->driver_features |= DRIVER_ATOMIC; | 4393 | dev->driver->driver_features |= DRIVER_ATOMIC; |
| 4388 | 4394 | ||
| 4389 | /* small shared memory area we use for notifiers and semaphores */ | 4395 | /* small shared memory area we use for notifiers and semaphores */ |
| 4390 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 4396 | ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
| 4391 | 0, 0x0000, NULL, NULL, &disp->sync); | 4397 | 0, 0x0000, NULL, NULL, &disp->sync); |
| 4392 | if (!ret) { | 4398 | if (!ret) { |
| 4393 | ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true); | 4399 | ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true); |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index f68c7054fd53..a369d978e267 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
| @@ -37,9 +37,9 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
| 37 | { | 37 | { |
| 38 | struct nv10_fence_priv *priv = chan->drm->fence; | 38 | struct nv10_fence_priv *priv = chan->drm->fence; |
| 39 | struct nv10_fence_chan *fctx; | 39 | struct nv10_fence_chan *fctx; |
| 40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 40 | struct ttm_mem_reg *reg = &priv->bo->bo.mem; |
| 41 | u32 start = mem->start * PAGE_SIZE; | 41 | u32 start = reg->start * PAGE_SIZE; |
| 42 | u32 limit = start + mem->size - 1; | 42 | u32 limit = start + reg->size - 1; |
| 43 | int ret; | 43 | int ret; |
| 44 | 44 | ||
| 45 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 45 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
| @@ -82,7 +82,7 @@ nv50_fence_create(struct nouveau_drm *drm) | |||
| 82 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); | 82 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
| 83 | spin_lock_init(&priv->lock); | 83 | spin_lock_init(&priv->lock); |
| 84 | 84 | ||
| 85 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 85 | ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
| 86 | 0, 0x0000, NULL, NULL, &priv->bo); | 86 | 0, 0x0000, NULL, NULL, &priv->bo); |
| 87 | if (!ret) { | 87 | if (!ret) { |
| 88 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); | 88 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 52b87ae83e7b..bd7a8a1e4ad9 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
| @@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan) | |||
| 107 | struct nv84_fence_chan *fctx = chan->fence; | 107 | struct nv84_fence_chan *fctx = chan->fence; |
| 108 | 108 | ||
| 109 | nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); | 109 | nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); |
| 110 | mutex_lock(&priv->mutex); | ||
| 110 | nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); | 111 | nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); |
| 111 | nouveau_bo_vma_del(priv->bo, &fctx->vma); | 112 | nouveau_bo_vma_del(priv->bo, &fctx->vma); |
| 113 | mutex_unlock(&priv->mutex); | ||
| 112 | nouveau_fence_context_del(&fctx->base); | 114 | nouveau_fence_context_del(&fctx->base); |
| 113 | chan->fence = NULL; | 115 | chan->fence = NULL; |
| 114 | nouveau_fence_context_free(&fctx->base); | 116 | nouveau_fence_context_free(&fctx->base); |
| @@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan) | |||
| 134 | fctx->base.sync32 = nv84_fence_sync32; | 136 | fctx->base.sync32 = nv84_fence_sync32; |
| 135 | fctx->base.sequence = nv84_fence_read(chan); | 137 | fctx->base.sequence = nv84_fence_read(chan); |
| 136 | 138 | ||
| 139 | mutex_lock(&priv->mutex); | ||
| 137 | ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); | 140 | ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); |
| 138 | if (ret == 0) { | 141 | if (ret == 0) { |
| 139 | ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, | 142 | ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, |
| 140 | &fctx->vma_gart); | 143 | &fctx->vma_gart); |
| 141 | } | 144 | } |
| 145 | mutex_unlock(&priv->mutex); | ||
| 142 | 146 | ||
| 143 | if (ret) | 147 | if (ret) |
| 144 | nv84_fence_context_del(chan); | 148 | nv84_fence_context_del(chan); |
| @@ -193,7 +197,7 @@ nv84_fence_destroy(struct nouveau_drm *drm) | |||
| 193 | int | 197 | int |
| 194 | nv84_fence_create(struct nouveau_drm *drm) | 198 | nv84_fence_create(struct nouveau_drm *drm) |
| 195 | { | 199 | { |
| 196 | struct nvkm_fifo *fifo = nvxx_fifo(&drm->device); | 200 | struct nvkm_fifo *fifo = nvxx_fifo(&drm->client.device); |
| 197 | struct nv84_fence_priv *priv; | 201 | struct nv84_fence_priv *priv; |
| 198 | u32 domain; | 202 | u32 domain; |
| 199 | int ret; | 203 | int ret; |
| @@ -212,15 +216,17 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
| 212 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); | 216 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
| 213 | priv->base.uevent = true; | 217 | priv->base.uevent = true; |
| 214 | 218 | ||
| 219 | mutex_init(&priv->mutex); | ||
| 220 | |||
| 215 | /* Use VRAM if there is any ; otherwise fallback to system memory */ | 221 | /* Use VRAM if there is any ; otherwise fallback to system memory */ |
| 216 | domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : | 222 | domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : |
| 217 | /* | 223 | /* |
| 218 | * fences created in sysmem must be non-cached or we | 224 | * fences created in sysmem must be non-cached or we |
| 219 | * will lose CPU/GPU coherency! | 225 | * will lose CPU/GPU coherency! |
| 220 | */ | 226 | */ |
| 221 | TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; | 227 | TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; |
| 222 | ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, domain, 0, | 228 | ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0, |
| 223 | 0, NULL, NULL, &priv->bo); | 229 | domain, 0, 0, NULL, NULL, &priv->bo); |
| 224 | if (ret == 0) { | 230 | if (ret == 0) { |
| 225 | ret = nouveau_bo_pin(priv->bo, domain, false); | 231 | ret = nouveau_bo_pin(priv->bo, domain, false); |
| 226 | if (ret == 0) { | 232 | if (ret == 0) { |
| @@ -233,7 +239,7 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
| 233 | } | 239 | } |
| 234 | 240 | ||
| 235 | if (ret == 0) | 241 | if (ret == 0) |
| 236 | ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, | 242 | ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0, |
| 237 | TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0, | 243 | TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0, |
| 238 | 0, NULL, NULL, &priv->bo_gart); | 244 | 0, NULL, NULL, &priv->bo_gart); |
| 239 | if (ret == 0) { | 245 | if (ret == 0) { |
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild index ff8ed3a04d06..067b5e9f5ec1 100644 --- a/drivers/gpu/drm/nouveau/nvif/Kbuild +++ b/drivers/gpu/drm/nouveau/nvif/Kbuild | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | nvif-y := nvif/object.o | 1 | nvif-y := nvif/object.o |
| 2 | nvif-y += nvif/client.o | 2 | nvif-y += nvif/client.o |
| 3 | nvif-y += nvif/device.o | 3 | nvif-y += nvif/device.o |
| 4 | nvif-y += nvif/driver.o | ||
| 4 | nvif-y += nvif/notify.o | 5 | nvif-y += nvif/notify.o |
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c index 29c20dfd894d..12db54965c20 100644 --- a/drivers/gpu/drm/nouveau/nvif/client.c +++ b/drivers/gpu/drm/nouveau/nvif/client.c | |||
| @@ -26,6 +26,9 @@ | |||
| 26 | #include <nvif/driver.h> | 26 | #include <nvif/driver.h> |
| 27 | #include <nvif/ioctl.h> | 27 | #include <nvif/ioctl.h> |
| 28 | 28 | ||
| 29 | #include <nvif/class.h> | ||
| 30 | #include <nvif/if0000.h> | ||
| 31 | |||
| 29 | int | 32 | int |
| 30 | nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) | 33 | nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) |
| 31 | { | 34 | { |
| @@ -47,37 +50,29 @@ nvif_client_resume(struct nvif_client *client) | |||
| 47 | void | 50 | void |
| 48 | nvif_client_fini(struct nvif_client *client) | 51 | nvif_client_fini(struct nvif_client *client) |
| 49 | { | 52 | { |
| 53 | nvif_object_fini(&client->object); | ||
| 50 | if (client->driver) { | 54 | if (client->driver) { |
| 51 | client->driver->fini(client->object.priv); | 55 | if (client->driver->fini) |
| 56 | client->driver->fini(client->object.priv); | ||
| 52 | client->driver = NULL; | 57 | client->driver = NULL; |
| 53 | client->object.client = NULL; | ||
| 54 | nvif_object_fini(&client->object); | ||
| 55 | } | 58 | } |
| 56 | } | 59 | } |
| 57 | 60 | ||
| 58 | static const struct nvif_driver * | ||
| 59 | nvif_drivers[] = { | ||
| 60 | #ifdef __KERNEL__ | ||
| 61 | &nvif_driver_nvkm, | ||
| 62 | #else | ||
| 63 | &nvif_driver_drm, | ||
| 64 | &nvif_driver_lib, | ||
| 65 | &nvif_driver_null, | ||
| 66 | #endif | ||
| 67 | NULL | ||
| 68 | }; | ||
| 69 | |||
| 70 | int | 61 | int |
| 71 | nvif_client_init(const char *driver, const char *name, u64 device, | 62 | nvif_client_init(struct nvif_client *parent, const char *name, u64 device, |
| 72 | const char *cfg, const char *dbg, struct nvif_client *client) | 63 | struct nvif_client *client) |
| 73 | { | 64 | { |
| 65 | struct nvif_client_v0 args = { .device = device }; | ||
| 74 | struct { | 66 | struct { |
| 75 | struct nvif_ioctl_v0 ioctl; | 67 | struct nvif_ioctl_v0 ioctl; |
| 76 | struct nvif_ioctl_nop_v0 nop; | 68 | struct nvif_ioctl_nop_v0 nop; |
| 77 | } args = {}; | 69 | } nop = {}; |
| 78 | int ret, i; | 70 | int ret; |
| 79 | 71 | ||
| 80 | ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object); | 72 | strncpy(args.name, name, sizeof(args.name)); |
| 73 | ret = nvif_object_init(parent != client ? &parent->object : NULL, | ||
| 74 | 0, NVIF_CLASS_CLIENT, &args, sizeof(args), | ||
| 75 | &client->object); | ||
| 81 | if (ret) | 76 | if (ret) |
| 82 | return ret; | 77 | return ret; |
| 83 | 78 | ||
| @@ -85,19 +80,11 @@ nvif_client_init(const char *driver, const char *name, u64 device, | |||
| 85 | client->object.handle = ~0; | 80 | client->object.handle = ~0; |
| 86 | client->route = NVIF_IOCTL_V0_ROUTE_NVIF; | 81 | client->route = NVIF_IOCTL_V0_ROUTE_NVIF; |
| 87 | client->super = true; | 82 | client->super = true; |
| 88 | 83 | client->driver = parent->driver; | |
| 89 | for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) { | ||
| 90 | if (!driver || !strcmp(client->driver->name, driver)) { | ||
| 91 | ret = client->driver->init(name, device, cfg, dbg, | ||
| 92 | &client->object.priv); | ||
| 93 | if (!ret || driver) | ||
| 94 | break; | ||
| 95 | } | ||
| 96 | } | ||
| 97 | 84 | ||
| 98 | if (ret == 0) { | 85 | if (ret == 0) { |
| 99 | ret = nvif_client_ioctl(client, &args, sizeof(args)); | 86 | ret = nvif_client_ioctl(client, &nop, sizeof(nop)); |
| 100 | client->version = args.nop.version; | 87 | client->version = nop.nop.version; |
| 101 | } | 88 | } |
| 102 | 89 | ||
| 103 | if (ret) | 90 | if (ret) |
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.c b/drivers/gpu/drm/nouveau/nvif/driver.c new file mode 100644 index 000000000000..701330956e33 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/driver.c | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2016 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | #include <nvif/driver.h> | ||
| 25 | #include <nvif/client.h> | ||
| 26 | |||
| 27 | static const struct nvif_driver * | ||
| 28 | nvif_driver[] = { | ||
| 29 | #ifdef __KERNEL__ | ||
| 30 | &nvif_driver_nvkm, | ||
| 31 | #else | ||
| 32 | &nvif_driver_drm, | ||
| 33 | &nvif_driver_lib, | ||
| 34 | &nvif_driver_null, | ||
| 35 | #endif | ||
| 36 | NULL | ||
| 37 | }; | ||
| 38 | |||
| 39 | int | ||
| 40 | nvif_driver_init(const char *drv, const char *cfg, const char *dbg, | ||
| 41 | const char *name, u64 device, struct nvif_client *client) | ||
| 42 | { | ||
| 43 | int ret = -EINVAL, i; | ||
| 44 | |||
| 45 | for (i = 0; (client->driver = nvif_driver[i]); i++) { | ||
| 46 | if (!drv || !strcmp(client->driver->name, drv)) { | ||
| 47 | ret = client->driver->init(name, device, cfg, dbg, | ||
| 48 | &client->object.priv); | ||
| 49 | if (ret == 0) | ||
| 50 | break; | ||
| 51 | client->driver->fini(client->object.priv); | ||
| 52 | } | ||
| 53 | } | ||
| 54 | |||
| 55 | if (ret == 0) | ||
| 56 | ret = nvif_client_init(client, name, device, client); | ||
| 57 | return ret; | ||
| 58 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild index 2832147b676c..e664378f6eda 100644 --- a/drivers/gpu/drm/nouveau/nvkm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | include $(src)/nvkm/core/Kbuild | 1 | include $(src)/nvkm/core/Kbuild |
| 2 | include $(src)/nvkm/falcon/Kbuild | ||
| 2 | include $(src)/nvkm/subdev/Kbuild | 3 | include $(src)/nvkm/subdev/Kbuild |
| 3 | include $(src)/nvkm/engine/Kbuild | 4 | include $(src)/nvkm/engine/Kbuild |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c index e1943910858e..0d3a896892b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c | |||
| @@ -31,6 +31,43 @@ | |||
| 31 | #include <nvif/if0000.h> | 31 | #include <nvif/if0000.h> |
| 32 | #include <nvif/unpack.h> | 32 | #include <nvif/unpack.h> |
| 33 | 33 | ||
| 34 | static int | ||
| 35 | nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, | ||
| 36 | struct nvkm_object **pobject) | ||
| 37 | { | ||
| 38 | union { | ||
| 39 | struct nvif_client_v0 v0; | ||
| 40 | } *args = argv; | ||
| 41 | struct nvkm_client *client; | ||
| 42 | int ret = -ENOSYS; | ||
| 43 | |||
| 44 | if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){ | ||
| 45 | args->v0.name[sizeof(args->v0.name) - 1] = 0; | ||
| 46 | ret = nvkm_client_new(args->v0.name, args->v0.device, NULL, | ||
| 47 | NULL, oclass->client->ntfy, &client); | ||
| 48 | if (ret) | ||
| 49 | return ret; | ||
| 50 | } else | ||
| 51 | return ret; | ||
| 52 | |||
| 53 | client->object.client = oclass->client; | ||
| 54 | client->object.handle = oclass->handle; | ||
| 55 | client->object.route = oclass->route; | ||
| 56 | client->object.token = oclass->token; | ||
| 57 | client->object.object = oclass->object; | ||
| 58 | client->debug = oclass->client->debug; | ||
| 59 | *pobject = &client->object; | ||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | const struct nvkm_sclass | ||
| 64 | nvkm_uclient_sclass = { | ||
| 65 | .oclass = NVIF_CLASS_CLIENT, | ||
| 66 | .minver = 0, | ||
| 67 | .maxver = 0, | ||
| 68 | .ctor = nvkm_uclient_new, | ||
| 69 | }; | ||
| 70 | |||
| 34 | struct nvkm_client_notify { | 71 | struct nvkm_client_notify { |
| 35 | struct nvkm_client *client; | 72 | struct nvkm_client *client; |
| 36 | struct nvkm_notify n; | 73 | struct nvkm_notify n; |
| @@ -138,17 +175,30 @@ nvkm_client_notify_new(struct nvkm_object *object, | |||
| 138 | return ret; | 175 | return ret; |
| 139 | } | 176 | } |
| 140 | 177 | ||
| 178 | static const struct nvkm_object_func nvkm_client; | ||
| 179 | struct nvkm_client * | ||
| 180 | nvkm_client_search(struct nvkm_client *client, u64 handle) | ||
| 181 | { | ||
| 182 | struct nvkm_object *object; | ||
| 183 | |||
| 184 | object = nvkm_object_search(client, handle, &nvkm_client); | ||
| 185 | if (IS_ERR(object)) | ||
| 186 | return (void *)object; | ||
| 187 | |||
| 188 | return nvkm_client(object); | ||
| 189 | } | ||
| 190 | |||
| 141 | static int | 191 | static int |
| 142 | nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size) | 192 | nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size) |
| 143 | { | 193 | { |
| 144 | union { | 194 | union { |
| 145 | struct nv_client_devlist_v0 v0; | 195 | struct nvif_client_devlist_v0 v0; |
| 146 | } *args = data; | 196 | } *args = data; |
| 147 | int ret = -ENOSYS; | 197 | int ret = -ENOSYS; |
| 148 | 198 | ||
| 149 | nvif_ioctl(object, "client devlist size %d\n", size); | 199 | nvif_ioctl(&client->object, "client devlist size %d\n", size); |
| 150 | if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { | 200 | if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { |
| 151 | nvif_ioctl(object, "client devlist vers %d count %d\n", | 201 | nvif_ioctl(&client->object, "client devlist vers %d count %d\n", |
| 152 | args->v0.version, args->v0.count); | 202 | args->v0.version, args->v0.count); |
| 153 | if (size == sizeof(args->v0.device[0]) * args->v0.count) { | 203 | if (size == sizeof(args->v0.device[0]) * args->v0.count) { |
| 154 | ret = nvkm_device_list(args->v0.device, args->v0.count); | 204 | ret = nvkm_device_list(args->v0.device, args->v0.count); |
| @@ -167,9 +217,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size) | |||
| 167 | static int | 217 | static int |
| 168 | nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) | 218 | nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) |
| 169 | { | 219 | { |
| 220 | struct nvkm_client *client = nvkm_client(object); | ||
| 170 | switch (mthd) { | 221 | switch (mthd) { |
| 171 | case NV_CLIENT_DEVLIST: | 222 | case NVIF_CLIENT_V0_DEVLIST: |
| 172 | return nvkm_client_mthd_devlist(object, data, size); | 223 | return nvkm_client_mthd_devlist(client, data, size); |
| 173 | default: | 224 | default: |
| 174 | break; | 225 | break; |
| 175 | } | 226 | } |
| @@ -190,7 +241,8 @@ nvkm_client_child_get(struct nvkm_object *object, int index, | |||
| 190 | const struct nvkm_sclass *sclass; | 241 | const struct nvkm_sclass *sclass; |
| 191 | 242 | ||
| 192 | switch (index) { | 243 | switch (index) { |
| 193 | case 0: sclass = &nvkm_udevice_sclass; break; | 244 | case 0: sclass = &nvkm_uclient_sclass; break; |
| 245 | case 1: sclass = &nvkm_udevice_sclass; break; | ||
| 194 | default: | 246 | default: |
| 195 | return -EINVAL; | 247 | return -EINVAL; |
| 196 | } | 248 | } |
| @@ -200,110 +252,54 @@ nvkm_client_child_get(struct nvkm_object *object, int index, | |||
| 200 | return 0; | 252 | return 0; |
| 201 | } | 253 | } |
| 202 | 254 | ||
| 203 | static const struct nvkm_object_func | 255 | static int |
| 204 | nvkm_client_object_func = { | 256 | nvkm_client_fini(struct nvkm_object *object, bool suspend) |
| 205 | .mthd = nvkm_client_mthd, | ||
| 206 | .sclass = nvkm_client_child_get, | ||
| 207 | }; | ||
| 208 | |||
| 209 | void | ||
| 210 | nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object) | ||
| 211 | { | ||
| 212 | if (!RB_EMPTY_NODE(&object->node)) | ||
| 213 | rb_erase(&object->node, &client->objroot); | ||
| 214 | } | ||
| 215 | |||
| 216 | bool | ||
| 217 | nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object) | ||
| 218 | { | ||
| 219 | struct rb_node **ptr = &client->objroot.rb_node; | ||
| 220 | struct rb_node *parent = NULL; | ||
| 221 | |||
| 222 | while (*ptr) { | ||
| 223 | struct nvkm_object *this = | ||
| 224 | container_of(*ptr, typeof(*this), node); | ||
| 225 | parent = *ptr; | ||
| 226 | if (object->object < this->object) | ||
| 227 | ptr = &parent->rb_left; | ||
| 228 | else | ||
| 229 | if (object->object > this->object) | ||
| 230 | ptr = &parent->rb_right; | ||
| 231 | else | ||
| 232 | return false; | ||
| 233 | } | ||
| 234 | |||
| 235 | rb_link_node(&object->node, parent, ptr); | ||
| 236 | rb_insert_color(&object->node, &client->objroot); | ||
| 237 | return true; | ||
| 238 | } | ||
| 239 | |||
| 240 | struct nvkm_object * | ||
| 241 | nvkm_client_search(struct nvkm_client *client, u64 handle) | ||
| 242 | { | ||
| 243 | struct rb_node *node = client->objroot.rb_node; | ||
| 244 | while (node) { | ||
| 245 | struct nvkm_object *object = | ||
| 246 | container_of(node, typeof(*object), node); | ||
| 247 | if (handle < object->object) | ||
| 248 | node = node->rb_left; | ||
| 249 | else | ||
| 250 | if (handle > object->object) | ||
| 251 | node = node->rb_right; | ||
| 252 | else | ||
| 253 | return object; | ||
| 254 | } | ||
| 255 | return NULL; | ||
| 256 | } | ||
| 257 | |||
| 258 | int | ||
| 259 | nvkm_client_fini(struct nvkm_client *client, bool suspend) | ||
| 260 | { | 257 | { |
| 261 | struct nvkm_object *object = &client->object; | 258 | struct nvkm_client *client = nvkm_client(object); |
| 262 | const char *name[2] = { "fini", "suspend" }; | 259 | const char *name[2] = { "fini", "suspend" }; |
| 263 | int i; | 260 | int i; |
| 264 | nvif_debug(object, "%s notify\n", name[suspend]); | 261 | nvif_debug(object, "%s notify\n", name[suspend]); |
| 265 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) | 262 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) |
| 266 | nvkm_client_notify_put(client, i); | 263 | nvkm_client_notify_put(client, i); |
| 267 | return nvkm_object_fini(&client->object, suspend); | 264 | return 0; |
| 268 | } | ||
| 269 | |||
| 270 | int | ||
| 271 | nvkm_client_init(struct nvkm_client *client) | ||
| 272 | { | ||
| 273 | return nvkm_object_init(&client->object); | ||
| 274 | } | 265 | } |
| 275 | 266 | ||
| 276 | void | 267 | static void * |
| 277 | nvkm_client_del(struct nvkm_client **pclient) | 268 | nvkm_client_dtor(struct nvkm_object *object) |
| 278 | { | 269 | { |
| 279 | struct nvkm_client *client = *pclient; | 270 | struct nvkm_client *client = nvkm_client(object); |
| 280 | int i; | 271 | int i; |
| 281 | if (client) { | 272 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) |
| 282 | nvkm_client_fini(client, false); | 273 | nvkm_client_notify_del(client, i); |
| 283 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) | 274 | return client; |
| 284 | nvkm_client_notify_del(client, i); | ||
| 285 | nvkm_object_dtor(&client->object); | ||
| 286 | kfree(*pclient); | ||
| 287 | *pclient = NULL; | ||
| 288 | } | ||
| 289 | } | 275 | } |
| 290 | 276 | ||
| 277 | static const struct nvkm_object_func | ||
| 278 | nvkm_client = { | ||
| 279 | .dtor = nvkm_client_dtor, | ||
| 280 | .fini = nvkm_client_fini, | ||
| 281 | .mthd = nvkm_client_mthd, | ||
| 282 | .sclass = nvkm_client_child_get, | ||
| 283 | }; | ||
| 284 | |||
| 291 | int | 285 | int |
| 292 | nvkm_client_new(const char *name, u64 device, const char *cfg, | 286 | nvkm_client_new(const char *name, u64 device, const char *cfg, |
| 293 | const char *dbg, struct nvkm_client **pclient) | 287 | const char *dbg, |
| 288 | int (*ntfy)(const void *, u32, const void *, u32), | ||
| 289 | struct nvkm_client **pclient) | ||
| 294 | { | 290 | { |
| 295 | struct nvkm_oclass oclass = {}; | 291 | struct nvkm_oclass oclass = { .base = nvkm_uclient_sclass }; |
| 296 | struct nvkm_client *client; | 292 | struct nvkm_client *client; |
| 297 | 293 | ||
| 298 | if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL))) | 294 | if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL))) |
| 299 | return -ENOMEM; | 295 | return -ENOMEM; |
| 300 | oclass.client = client; | 296 | oclass.client = client; |
| 301 | 297 | ||
| 302 | nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object); | 298 | nvkm_object_ctor(&nvkm_client, &oclass, &client->object); |
| 303 | snprintf(client->name, sizeof(client->name), "%s", name); | 299 | snprintf(client->name, sizeof(client->name), "%s", name); |
| 304 | client->device = device; | 300 | client->device = device; |
| 305 | client->debug = nvkm_dbgopt(dbg, "CLIENT"); | 301 | client->debug = nvkm_dbgopt(dbg, "CLIENT"); |
| 306 | client->objroot = RB_ROOT; | 302 | client->objroot = RB_ROOT; |
| 307 | client->dmaroot = RB_ROOT; | 303 | client->ntfy = ntfy; |
| 308 | return 0; | 304 | return 0; |
| 309 | } | 305 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c index ee8e5831fe37..b6c916954a10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c | |||
| @@ -27,6 +27,14 @@ | |||
| 27 | 27 | ||
| 28 | #include <subdev/fb.h> | 28 | #include <subdev/fb.h> |
| 29 | 29 | ||
| 30 | bool | ||
| 31 | nvkm_engine_chsw_load(struct nvkm_engine *engine) | ||
| 32 | { | ||
| 33 | if (engine->func->chsw_load) | ||
| 34 | return engine->func->chsw_load(engine); | ||
| 35 | return false; | ||
| 36 | } | ||
| 37 | |||
| 30 | void | 38 | void |
| 31 | nvkm_engine_unref(struct nvkm_engine **pengine) | 39 | nvkm_engine_unref(struct nvkm_engine **pengine) |
| 32 | { | 40 | { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index b0db51847c36..be19bbe56bba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c | |||
| @@ -29,7 +29,8 @@ | |||
| 29 | #include <nvif/ioctl.h> | 29 | #include <nvif/ioctl.h> |
| 30 | 30 | ||
| 31 | static int | 31 | static int |
| 32 | nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size) | 32 | nvkm_ioctl_nop(struct nvkm_client *client, |
| 33 | struct nvkm_object *object, void *data, u32 size) | ||
| 33 | { | 34 | { |
| 34 | union { | 35 | union { |
| 35 | struct nvif_ioctl_nop_v0 v0; | 36 | struct nvif_ioctl_nop_v0 v0; |
| @@ -46,7 +47,8 @@ nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size) | |||
| 46 | } | 47 | } |
| 47 | 48 | ||
| 48 | static int | 49 | static int |
| 49 | nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size) | 50 | nvkm_ioctl_sclass(struct nvkm_client *client, |
| 51 | struct nvkm_object *object, void *data, u32 size) | ||
| 50 | { | 52 | { |
| 51 | union { | 53 | union { |
| 52 | struct nvif_ioctl_sclass_v0 v0; | 54 | struct nvif_ioctl_sclass_v0 v0; |
| @@ -78,12 +80,12 @@ nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size) | |||
| 78 | } | 80 | } |
| 79 | 81 | ||
| 80 | static int | 82 | static int |
| 81 | nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size) | 83 | nvkm_ioctl_new(struct nvkm_client *client, |
| 84 | struct nvkm_object *parent, void *data, u32 size) | ||
| 82 | { | 85 | { |
| 83 | union { | 86 | union { |
| 84 | struct nvif_ioctl_new_v0 v0; | 87 | struct nvif_ioctl_new_v0 v0; |
| 85 | } *args = data; | 88 | } *args = data; |
| 86 | struct nvkm_client *client = parent->client; | ||
| 87 | struct nvkm_object *object = NULL; | 89 | struct nvkm_object *object = NULL; |
| 88 | struct nvkm_oclass oclass; | 90 | struct nvkm_oclass oclass; |
| 89 | int ret = -ENOSYS, i = 0; | 91 | int ret = -ENOSYS, i = 0; |
| @@ -104,9 +106,11 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size) | |||
| 104 | 106 | ||
| 105 | do { | 107 | do { |
| 106 | memset(&oclass, 0x00, sizeof(oclass)); | 108 | memset(&oclass, 0x00, sizeof(oclass)); |
| 107 | oclass.client = client; | ||
| 108 | oclass.handle = args->v0.handle; | 109 | oclass.handle = args->v0.handle; |
| 110 | oclass.route = args->v0.route; | ||
| 111 | oclass.token = args->v0.token; | ||
| 109 | oclass.object = args->v0.object; | 112 | oclass.object = args->v0.object; |
| 113 | oclass.client = client; | ||
| 110 | oclass.parent = parent; | 114 | oclass.parent = parent; |
| 111 | ret = parent->func->sclass(parent, i++, &oclass); | 115 | ret = parent->func->sclass(parent, i++, &oclass); |
| 112 | if (ret) | 116 | if (ret) |
| @@ -125,10 +129,7 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size) | |||
| 125 | ret = nvkm_object_init(object); | 129 | ret = nvkm_object_init(object); |
| 126 | if (ret == 0) { | 130 | if (ret == 0) { |
| 127 | list_add(&object->head, &parent->tree); | 131 | list_add(&object->head, &parent->tree); |
| 128 | object->route = args->v0.route; | 132 | if (nvkm_object_insert(object)) { |
| 129 | object->token = args->v0.token; | ||
| 130 | object->object = args->v0.object; | ||
| 131 | if (nvkm_client_insert(client, object)) { | ||
| 132 | client->data = object; | 133 | client->data = object; |
| 133 | return 0; | 134 | return 0; |
| 134 | } | 135 | } |
| @@ -142,7 +143,8 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size) | |||
| 142 | } | 143 | } |
| 143 | 144 | ||
| 144 | static int | 145 | static int |
| 145 | nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size) | 146 | nvkm_ioctl_del(struct nvkm_client *client, |
| 147 | struct nvkm_object *object, void *data, u32 size) | ||
| 146 | { | 148 | { |
| 147 | union { | 149 | union { |
| 148 | struct nvif_ioctl_del none; | 150 | struct nvif_ioctl_del none; |
| @@ -156,11 +158,12 @@ nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size) | |||
| 156 | nvkm_object_del(&object); | 158 | nvkm_object_del(&object); |
| 157 | } | 159 | } |
| 158 | 160 | ||
| 159 | return ret; | 161 | return ret ? ret : 1; |
| 160 | } | 162 | } |
| 161 | 163 | ||
| 162 | static int | 164 | static int |
| 163 | nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size) | 165 | nvkm_ioctl_mthd(struct nvkm_client *client, |
| 166 | struct nvkm_object *object, void *data, u32 size) | ||
| 164 | { | 167 | { |
| 165 | union { | 168 | union { |
| 166 | struct nvif_ioctl_mthd_v0 v0; | 169 | struct nvif_ioctl_mthd_v0 v0; |
| @@ -179,7 +182,8 @@ nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size) | |||
| 179 | 182 | ||
| 180 | 183 | ||
| 181 | static int | 184 | static int |
| 182 | nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size) | 185 | nvkm_ioctl_rd(struct nvkm_client *client, |
| 186 | struct nvkm_object *object, void *data, u32 size) | ||
| 183 | { | 187 | { |
| 184 | union { | 188 | union { |
| 185 | struct nvif_ioctl_rd_v0 v0; | 189 | struct nvif_ioctl_rd_v0 v0; |
| @@ -218,7 +222,8 @@ nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size) | |||
| 218 | } | 222 | } |
| 219 | 223 | ||
| 220 | static int | 224 | static int |
| 221 | nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size) | 225 | nvkm_ioctl_wr(struct nvkm_client *client, |
| 226 | struct nvkm_object *object, void *data, u32 size) | ||
| 222 | { | 227 | { |
| 223 | union { | 228 | union { |
| 224 | struct nvif_ioctl_wr_v0 v0; | 229 | struct nvif_ioctl_wr_v0 v0; |
| @@ -246,7 +251,8 @@ nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size) | |||
| 246 | } | 251 | } |
| 247 | 252 | ||
| 248 | static int | 253 | static int |
| 249 | nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size) | 254 | nvkm_ioctl_map(struct nvkm_client *client, |
| 255 | struct nvkm_object *object, void *data, u32 size) | ||
| 250 | { | 256 | { |
| 251 | union { | 257 | union { |
| 252 | struct nvif_ioctl_map_v0 v0; | 258 | struct nvif_ioctl_map_v0 v0; |
| @@ -264,7 +270,8 @@ nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size) | |||
| 264 | } | 270 | } |
| 265 | 271 | ||
| 266 | static int | 272 | static int |
| 267 | nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size) | 273 | nvkm_ioctl_unmap(struct nvkm_client *client, |
| 274 | struct nvkm_object *object, void *data, u32 size) | ||
| 268 | { | 275 | { |
| 269 | union { | 276 | union { |
| 270 | struct nvif_ioctl_unmap none; | 277 | struct nvif_ioctl_unmap none; |
| @@ -280,7 +287,8 @@ nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size) | |||
| 280 | } | 287 | } |
| 281 | 288 | ||
| 282 | static int | 289 | static int |
| 283 | nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size) | 290 | nvkm_ioctl_ntfy_new(struct nvkm_client *client, |
| 291 | struct nvkm_object *object, void *data, u32 size) | ||
| 284 | { | 292 | { |
| 285 | union { | 293 | union { |
| 286 | struct nvif_ioctl_ntfy_new_v0 v0; | 294 | struct nvif_ioctl_ntfy_new_v0 v0; |
| @@ -306,9 +314,9 @@ nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size) | |||
| 306 | } | 314 | } |
| 307 | 315 | ||
| 308 | static int | 316 | static int |
| 309 | nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size) | 317 | nvkm_ioctl_ntfy_del(struct nvkm_client *client, |
| 318 | struct nvkm_object *object, void *data, u32 size) | ||
| 310 | { | 319 | { |
| 311 | struct nvkm_client *client = object->client; | ||
| 312 | union { | 320 | union { |
| 313 | struct nvif_ioctl_ntfy_del_v0 v0; | 321 | struct nvif_ioctl_ntfy_del_v0 v0; |
| 314 | } *args = data; | 322 | } *args = data; |
| @@ -325,9 +333,9 @@ nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size) | |||
| 325 | } | 333 | } |
| 326 | 334 | ||
| 327 | static int | 335 | static int |
| 328 | nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size) | 336 | nvkm_ioctl_ntfy_get(struct nvkm_client *client, |
| 337 | struct nvkm_object *object, void *data, u32 size) | ||
| 329 | { | 338 | { |
| 330 | struct nvkm_client *client = object->client; | ||
| 331 | union { | 339 | union { |
| 332 | struct nvif_ioctl_ntfy_get_v0 v0; | 340 | struct nvif_ioctl_ntfy_get_v0 v0; |
| 333 | } *args = data; | 341 | } *args = data; |
| @@ -344,9 +352,9 @@ nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size) | |||
| 344 | } | 352 | } |
| 345 | 353 | ||
| 346 | static int | 354 | static int |
| 347 | nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size) | 355 | nvkm_ioctl_ntfy_put(struct nvkm_client *client, |
| 356 | struct nvkm_object *object, void *data, u32 size) | ||
| 348 | { | 357 | { |
| 349 | struct nvkm_client *client = object->client; | ||
| 350 | union { | 358 | union { |
| 351 | struct nvif_ioctl_ntfy_put_v0 v0; | 359 | struct nvif_ioctl_ntfy_put_v0 v0; |
| 352 | } *args = data; | 360 | } *args = data; |
| @@ -364,7 +372,7 @@ nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size) | |||
| 364 | 372 | ||
| 365 | static struct { | 373 | static struct { |
| 366 | int version; | 374 | int version; |
| 367 | int (*func)(struct nvkm_object *, void *, u32); | 375 | int (*func)(struct nvkm_client *, struct nvkm_object *, void *, u32); |
| 368 | } | 376 | } |
| 369 | nvkm_ioctl_v0[] = { | 377 | nvkm_ioctl_v0[] = { |
| 370 | { 0x00, nvkm_ioctl_nop }, | 378 | { 0x00, nvkm_ioctl_nop }, |
| @@ -389,13 +397,10 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type, | |||
| 389 | struct nvkm_object *object; | 397 | struct nvkm_object *object; |
| 390 | int ret; | 398 | int ret; |
| 391 | 399 | ||
| 392 | if (handle) | 400 | object = nvkm_object_search(client, handle, NULL); |
| 393 | object = nvkm_client_search(client, handle); | 401 | if (IS_ERR(object)) { |
| 394 | else | ||
| 395 | object = &client->object; | ||
| 396 | if (unlikely(!object)) { | ||
| 397 | nvif_ioctl(&client->object, "object not found\n"); | 402 | nvif_ioctl(&client->object, "object not found\n"); |
| 398 | return -ENOENT; | 403 | return PTR_ERR(object); |
| 399 | } | 404 | } |
| 400 | 405 | ||
| 401 | if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) { | 406 | if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) { |
| @@ -407,7 +412,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type, | |||
| 407 | 412 | ||
| 408 | if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { | 413 | if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { |
| 409 | if (nvkm_ioctl_v0[type].version == 0) | 414 | if (nvkm_ioctl_v0[type].version == 0) |
| 410 | ret = nvkm_ioctl_v0[type].func(object, data, size); | 415 | ret = nvkm_ioctl_v0[type].func(client, object, data, size); |
| 411 | } | 416 | } |
| 412 | 417 | ||
| 413 | return ret; | 418 | return ret; |
| @@ -436,12 +441,13 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor, | |||
| 436 | &args->v0.route, &args->v0.token); | 441 | &args->v0.route, &args->v0.token); |
| 437 | } | 442 | } |
| 438 | 443 | ||
| 439 | nvif_ioctl(object, "return %d\n", ret); | 444 | if (ret != 1) { |
| 440 | if (hack) { | 445 | nvif_ioctl(object, "return %d\n", ret); |
| 441 | *hack = client->data; | 446 | if (hack) { |
| 442 | client->data = NULL; | 447 | *hack = client->data; |
| 448 | client->data = NULL; | ||
| 449 | } | ||
| 443 | } | 450 | } |
| 444 | 451 | ||
| 445 | client->super = false; | ||
| 446 | return ret; | 452 | return ret; |
| 447 | } | 453 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c index 09a1eee8fd33..fd19d652a7ab 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c | |||
| @@ -147,6 +147,7 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, | |||
| 147 | if (!this) | 147 | if (!this) |
| 148 | return -ENOMEM; | 148 | return -ENOMEM; |
| 149 | 149 | ||
| 150 | this->next = NULL; | ||
| 150 | this->type = type; | 151 | this->type = type; |
| 151 | list_del(&this->fl_entry); | 152 | list_del(&this->fl_entry); |
| 152 | *pnode = this; | 153 | *pnode = this; |
| @@ -225,6 +226,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, | |||
| 225 | if (!this) | 226 | if (!this) |
| 226 | return -ENOMEM; | 227 | return -ENOMEM; |
| 227 | 228 | ||
| 229 | this->next = NULL; | ||
| 228 | this->type = type; | 230 | this->type = type; |
| 229 | list_del(&this->fl_entry); | 231 | list_del(&this->fl_entry); |
| 230 | *pnode = this; | 232 | *pnode = this; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c index 67aa7223dcd7..89d2e9da11c7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/object.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c | |||
| @@ -25,6 +25,65 @@ | |||
| 25 | #include <core/client.h> | 25 | #include <core/client.h> |
| 26 | #include <core/engine.h> | 26 | #include <core/engine.h> |
| 27 | 27 | ||
| 28 | struct nvkm_object * | ||
| 29 | nvkm_object_search(struct nvkm_client *client, u64 handle, | ||
| 30 | const struct nvkm_object_func *func) | ||
| 31 | { | ||
| 32 | struct nvkm_object *object; | ||
| 33 | |||
| 34 | if (handle) { | ||
| 35 | struct rb_node *node = client->objroot.rb_node; | ||
| 36 | while (node) { | ||
| 37 | object = rb_entry(node, typeof(*object), node); | ||
| 38 | if (handle < object->object) | ||
| 39 | node = node->rb_left; | ||
| 40 | else | ||
| 41 | if (handle > object->object) | ||
| 42 | node = node->rb_right; | ||
| 43 | else | ||
| 44 | goto done; | ||
| 45 | } | ||
| 46 | return ERR_PTR(-ENOENT); | ||
| 47 | } else { | ||
| 48 | object = &client->object; | ||
| 49 | } | ||
| 50 | |||
| 51 | done: | ||
| 52 | if (unlikely(func && object->func != func)) | ||
| 53 | return ERR_PTR(-EINVAL); | ||
| 54 | return object; | ||
| 55 | } | ||
| 56 | |||
| 57 | void | ||
| 58 | nvkm_object_remove(struct nvkm_object *object) | ||
| 59 | { | ||
| 60 | if (!RB_EMPTY_NODE(&object->node)) | ||
| 61 | rb_erase(&object->node, &object->client->objroot); | ||
| 62 | } | ||
| 63 | |||
| 64 | bool | ||
| 65 | nvkm_object_insert(struct nvkm_object *object) | ||
| 66 | { | ||
| 67 | struct rb_node **ptr = &object->client->objroot.rb_node; | ||
| 68 | struct rb_node *parent = NULL; | ||
| 69 | |||
| 70 | while (*ptr) { | ||
| 71 | struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node); | ||
| 72 | parent = *ptr; | ||
| 73 | if (object->object < this->object) | ||
| 74 | ptr = &parent->rb_left; | ||
| 75 | else | ||
| 76 | if (object->object > this->object) | ||
| 77 | ptr = &parent->rb_right; | ||
| 78 | else | ||
| 79 | return false; | ||
| 80 | } | ||
| 81 | |||
| 82 | rb_link_node(&object->node, parent, ptr); | ||
| 83 | rb_insert_color(&object->node, &object->client->objroot); | ||
| 84 | return true; | ||
| 85 | } | ||
| 86 | |||
| 28 | int | 87 | int |
| 29 | nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) | 88 | nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) |
| 30 | { | 89 | { |
| @@ -214,7 +273,7 @@ nvkm_object_del(struct nvkm_object **pobject) | |||
| 214 | struct nvkm_object *object = *pobject; | 273 | struct nvkm_object *object = *pobject; |
| 215 | if (object && !WARN_ON(!object->func)) { | 274 | if (object && !WARN_ON(!object->func)) { |
| 216 | *pobject = nvkm_object_dtor(object); | 275 | *pobject = nvkm_object_dtor(object); |
| 217 | nvkm_client_remove(object->client, object); | 276 | nvkm_object_remove(object); |
| 218 | list_del(&object->head); | 277 | list_del(&object->head); |
| 219 | kfree(*pobject); | 278 | kfree(*pobject); |
| 220 | *pobject = NULL; | 279 | *pobject = NULL; |
| @@ -230,6 +289,9 @@ nvkm_object_ctor(const struct nvkm_object_func *func, | |||
| 230 | object->engine = nvkm_engine_ref(oclass->engine); | 289 | object->engine = nvkm_engine_ref(oclass->engine); |
| 231 | object->oclass = oclass->base.oclass; | 290 | object->oclass = oclass->base.oclass; |
| 232 | object->handle = oclass->handle; | 291 | object->handle = oclass->handle; |
| 292 | object->route = oclass->route; | ||
| 293 | object->token = oclass->token; | ||
| 294 | object->object = oclass->object; | ||
| 233 | INIT_LIST_HEAD(&object->head); | 295 | INIT_LIST_HEAD(&object->head); |
| 234 | INIT_LIST_HEAD(&object->tree); | 296 | INIT_LIST_HEAD(&object->tree); |
| 235 | RB_CLEAR_NODE(&object->node); | 297 | RB_CLEAR_NODE(&object->node); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index cceda959b47c..273562dd6bbd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
| @@ -993,7 +993,7 @@ nv92_chipset = { | |||
| 993 | .mc = g84_mc_new, | 993 | .mc = g84_mc_new, |
| 994 | .mmu = nv50_mmu_new, | 994 | .mmu = nv50_mmu_new, |
| 995 | .mxm = nv50_mxm_new, | 995 | .mxm = nv50_mxm_new, |
| 996 | .pci = g84_pci_new, | 996 | .pci = g92_pci_new, |
| 997 | .therm = g84_therm_new, | 997 | .therm = g84_therm_new, |
| 998 | .timer = nv41_timer_new, | 998 | .timer = nv41_timer_new, |
| 999 | .volt = nv40_volt_new, | 999 | .volt = nv40_volt_new, |
| @@ -2138,6 +2138,7 @@ nv12b_chipset = { | |||
| 2138 | .ltc = gm200_ltc_new, | 2138 | .ltc = gm200_ltc_new, |
| 2139 | .mc = gk20a_mc_new, | 2139 | .mc = gk20a_mc_new, |
| 2140 | .mmu = gf100_mmu_new, | 2140 | .mmu = gf100_mmu_new, |
| 2141 | .pmu = gm20b_pmu_new, | ||
| 2141 | .secboot = gm20b_secboot_new, | 2142 | .secboot = gm20b_secboot_new, |
| 2142 | .timer = gk20a_timer_new, | 2143 | .timer = gk20a_timer_new, |
| 2143 | .top = gk104_top_new, | 2144 | .top = gk104_top_new, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c index 0a1381a84552..070ec5e18fdb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c | |||
| @@ -137,7 +137,6 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func, | |||
| 137 | const struct nvkm_oclass *oclass, | 137 | const struct nvkm_oclass *oclass, |
| 138 | struct nvkm_object **pobject) | 138 | struct nvkm_object **pobject) |
| 139 | { | 139 | { |
| 140 | struct nvkm_device *device = root->disp->base.engine.subdev.device; | ||
| 141 | struct nvkm_client *client = oclass->client; | 140 | struct nvkm_client *client = oclass->client; |
| 142 | struct nvkm_dmaobj *dmaobj; | 141 | struct nvkm_dmaobj *dmaobj; |
| 143 | struct nv50_disp_dmac *chan; | 142 | struct nv50_disp_dmac *chan; |
| @@ -153,9 +152,9 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func, | |||
| 153 | if (ret) | 152 | if (ret) |
| 154 | return ret; | 153 | return ret; |
| 155 | 154 | ||
| 156 | dmaobj = nvkm_dma_search(device->dma, client, push); | 155 | dmaobj = nvkm_dmaobj_search(client, push); |
| 157 | if (!dmaobj) | 156 | if (IS_ERR(dmaobj)) |
| 158 | return -ENOENT; | 157 | return PTR_ERR(dmaobj); |
| 159 | 158 | ||
| 160 | if (dmaobj->limit - dmaobj->start != 0xfff) | 159 | if (dmaobj->limit - dmaobj->start != 0xfff) |
| 161 | return -EINVAL; | 160 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c index 6f0436df0219..f8f2f16c22a2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c | |||
| @@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1) | |||
| 59 | ); | 59 | ); |
| 60 | } | 60 | } |
| 61 | for (i = 0; i < size; i++) | 61 | for (i = 0; i < size; i++) |
| 62 | nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]); | 62 | nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]); |
| 63 | for (; i < 0x60; i++) | 63 | for (; i < 0x60; i++) |
| 64 | nvkm_wr32(device, 0x61c440 + soff, (i << 8)); | 64 | nvkm_wr32(device, 0x61c440 + soff, (i << 8)); |
| 65 | nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); | 65 | nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 567466f93cd5..0db8efbf1c2e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | |||
| @@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device) | |||
| 433 | case 0x94: | 433 | case 0x94: |
| 434 | case 0x96: | 434 | case 0x96: |
| 435 | case 0x98: | 435 | case 0x98: |
| 436 | case 0xaa: | ||
| 437 | case 0xac: | ||
| 438 | return true; | 436 | return true; |
| 439 | default: | 437 | default: |
| 440 | break; | 438 | break; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c index 4510cb6e10a8..627b9ee1ddd2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c | |||
| @@ -39,13 +39,6 @@ g94_sor_loff(struct nvkm_output_dp *outp) | |||
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | /******************************************************************************* | 41 | /******************************************************************************* |
| 42 | * TMDS/LVDS | ||
| 43 | ******************************************************************************/ | ||
| 44 | static const struct nvkm_output_func | ||
| 45 | g94_sor_output_func = { | ||
| 46 | }; | ||
| 47 | |||
| 48 | /******************************************************************************* | ||
| 49 | * DisplayPort | 42 | * DisplayPort |
| 50 | ******************************************************************************/ | 43 | ******************************************************************************/ |
| 51 | u32 | 44 | u32 |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c index f11ebdd16c77..11b7b8fd5dda 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c | |||
| @@ -28,24 +28,6 @@ | |||
| 28 | 28 | ||
| 29 | #include <nvif/class.h> | 29 | #include <nvif/class.h> |
| 30 | 30 | ||
| 31 | struct nvkm_dmaobj * | ||
| 32 | nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object) | ||
| 33 | { | ||
| 34 | struct rb_node *node = client->dmaroot.rb_node; | ||
| 35 | while (node) { | ||
| 36 | struct nvkm_dmaobj *dmaobj = | ||
| 37 | container_of(node, typeof(*dmaobj), rb); | ||
| 38 | if (object < dmaobj->handle) | ||
| 39 | node = node->rb_left; | ||
| 40 | else | ||
| 41 | if (object > dmaobj->handle) | ||
| 42 | node = node->rb_right; | ||
| 43 | else | ||
| 44 | return dmaobj; | ||
| 45 | } | ||
| 46 | return NULL; | ||
| 47 | } | ||
| 48 | |||
| 49 | static int | 31 | static int |
| 50 | nvkm_dma_oclass_new(struct nvkm_device *device, | 32 | nvkm_dma_oclass_new(struct nvkm_device *device, |
| 51 | const struct nvkm_oclass *oclass, void *data, u32 size, | 33 | const struct nvkm_oclass *oclass, void *data, u32 size, |
| @@ -53,34 +35,12 @@ nvkm_dma_oclass_new(struct nvkm_device *device, | |||
| 53 | { | 35 | { |
| 54 | struct nvkm_dma *dma = nvkm_dma(oclass->engine); | 36 | struct nvkm_dma *dma = nvkm_dma(oclass->engine); |
| 55 | struct nvkm_dmaobj *dmaobj = NULL; | 37 | struct nvkm_dmaobj *dmaobj = NULL; |
| 56 | struct nvkm_client *client = oclass->client; | ||
| 57 | struct rb_node **ptr = &client->dmaroot.rb_node; | ||
| 58 | struct rb_node *parent = NULL; | ||
| 59 | int ret; | 38 | int ret; |
| 60 | 39 | ||
| 61 | ret = dma->func->class_new(dma, oclass, data, size, &dmaobj); | 40 | ret = dma->func->class_new(dma, oclass, data, size, &dmaobj); |
| 62 | if (dmaobj) | 41 | if (dmaobj) |
| 63 | *pobject = &dmaobj->object; | 42 | *pobject = &dmaobj->object; |
| 64 | if (ret) | 43 | return ret; |
| 65 | return ret; | ||
| 66 | |||
| 67 | dmaobj->handle = oclass->object; | ||
| 68 | |||
| 69 | while (*ptr) { | ||
| 70 | struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb); | ||
| 71 | parent = *ptr; | ||
| 72 | if (dmaobj->handle < obj->handle) | ||
| 73 | ptr = &parent->rb_left; | ||
| 74 | else | ||
| 75 | if (dmaobj->handle > obj->handle) | ||
| 76 | ptr = &parent->rb_right; | ||
| 77 | else | ||
| 78 | return -EEXIST; | ||
| 79 | } | ||
| 80 | |||
| 81 | rb_link_node(&dmaobj->rb, parent, ptr); | ||
| 82 | rb_insert_color(&dmaobj->rb, &client->dmaroot); | ||
| 83 | return 0; | ||
| 84 | } | 44 | } |
| 85 | 45 | ||
| 86 | static const struct nvkm_device_oclass | 46 | static const struct nvkm_device_oclass |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c index 13c661b1ef14..d20cc0681a88 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c | |||
| @@ -31,6 +31,19 @@ | |||
| 31 | #include <nvif/cl0002.h> | 31 | #include <nvif/cl0002.h> |
| 32 | #include <nvif/unpack.h> | 32 | #include <nvif/unpack.h> |
| 33 | 33 | ||
| 34 | static const struct nvkm_object_func nvkm_dmaobj_func; | ||
| 35 | struct nvkm_dmaobj * | ||
| 36 | nvkm_dmaobj_search(struct nvkm_client *client, u64 handle) | ||
| 37 | { | ||
| 38 | struct nvkm_object *object; | ||
| 39 | |||
| 40 | object = nvkm_object_search(client, handle, &nvkm_dmaobj_func); | ||
| 41 | if (IS_ERR(object)) | ||
| 42 | return (void *)object; | ||
| 43 | |||
| 44 | return nvkm_dmaobj(object); | ||
| 45 | } | ||
| 46 | |||
| 34 | static int | 47 | static int |
| 35 | nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj, | 48 | nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj, |
| 36 | int align, struct nvkm_gpuobj **pgpuobj) | 49 | int align, struct nvkm_gpuobj **pgpuobj) |
| @@ -42,10 +55,7 @@ nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj, | |||
| 42 | static void * | 55 | static void * |
| 43 | nvkm_dmaobj_dtor(struct nvkm_object *base) | 56 | nvkm_dmaobj_dtor(struct nvkm_object *base) |
| 44 | { | 57 | { |
| 45 | struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base); | 58 | return nvkm_dmaobj(base); |
| 46 | if (!RB_EMPTY_NODE(&dmaobj->rb)) | ||
| 47 | rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot); | ||
| 48 | return dmaobj; | ||
| 49 | } | 59 | } |
| 50 | 60 | ||
| 51 | static const struct nvkm_object_func | 61 | static const struct nvkm_object_func |
| @@ -74,7 +84,6 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma, | |||
| 74 | nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object); | 84 | nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object); |
| 75 | dmaobj->func = func; | 85 | dmaobj->func = func; |
| 76 | dmaobj->dma = dma; | 86 | dmaobj->dma = dma; |
| 77 | RB_CLEAR_NODE(&dmaobj->rb); | ||
| 78 | 87 | ||
| 79 | nvif_ioctl(parent, "create dma size %d\n", *psize); | 88 | nvif_ioctl(parent, "create dma size %d\n", *psize); |
| 80 | if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { | 89 | if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index 1c9682ae3a6b..660ca7aa95ea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c | |||
| @@ -32,6 +32,17 @@ | |||
| 32 | #include <nvif/unpack.h> | 32 | #include <nvif/unpack.h> |
| 33 | 33 | ||
| 34 | void | 34 | void |
| 35 | nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid) | ||
| 36 | { | ||
| 37 | unsigned long flags; | ||
| 38 | if (WARN_ON(!fifo->func->recover_chan)) | ||
| 39 | return; | ||
| 40 | spin_lock_irqsave(&fifo->lock, flags); | ||
| 41 | fifo->func->recover_chan(fifo, chid); | ||
| 42 | spin_unlock_irqrestore(&fifo->lock, flags); | ||
| 43 | } | ||
| 44 | |||
| 45 | void | ||
| 35 | nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags) | 46 | nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags) |
| 36 | { | 47 | { |
| 37 | return fifo->func->pause(fifo, flags); | 48 | return fifo->func->pause(fifo, flags); |
| @@ -55,19 +66,29 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags, | |||
| 55 | } | 66 | } |
| 56 | 67 | ||
| 57 | struct nvkm_fifo_chan * | 68 | struct nvkm_fifo_chan * |
| 58 | nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) | 69 | nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst) |
| 59 | { | 70 | { |
| 60 | struct nvkm_fifo_chan *chan; | 71 | struct nvkm_fifo_chan *chan; |
| 61 | unsigned long flags; | ||
| 62 | spin_lock_irqsave(&fifo->lock, flags); | ||
| 63 | list_for_each_entry(chan, &fifo->chan, head) { | 72 | list_for_each_entry(chan, &fifo->chan, head) { |
| 64 | if (chan->inst->addr == inst) { | 73 | if (chan->inst->addr == inst) { |
| 65 | list_del(&chan->head); | 74 | list_del(&chan->head); |
| 66 | list_add(&chan->head, &fifo->chan); | 75 | list_add(&chan->head, &fifo->chan); |
| 67 | *rflags = flags; | ||
| 68 | return chan; | 76 | return chan; |
| 69 | } | 77 | } |
| 70 | } | 78 | } |
| 79 | return NULL; | ||
| 80 | } | ||
| 81 | |||
| 82 | struct nvkm_fifo_chan * | ||
| 83 | nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) | ||
| 84 | { | ||
| 85 | struct nvkm_fifo_chan *chan; | ||
| 86 | unsigned long flags; | ||
| 87 | spin_lock_irqsave(&fifo->lock, flags); | ||
| 88 | if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) { | ||
| 89 | *rflags = flags; | ||
| 90 | return chan; | ||
| 91 | } | ||
| 71 | spin_unlock_irqrestore(&fifo->lock, flags); | 92 | spin_unlock_irqrestore(&fifo->lock, flags); |
| 72 | return NULL; | 93 | return NULL; |
| 73 | } | 94 | } |
| @@ -90,9 +111,34 @@ nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags) | |||
| 90 | return NULL; | 111 | return NULL; |
| 91 | } | 112 | } |
| 92 | 113 | ||
| 114 | void | ||
| 115 | nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid) | ||
| 116 | { | ||
| 117 | nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0); | ||
| 118 | } | ||
| 119 | |||
| 93 | static int | 120 | static int |
| 94 | nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, | 121 | nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size, |
| 95 | struct nvkm_notify *notify) | 122 | struct nvkm_notify *notify) |
| 123 | { | ||
| 124 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); | ||
| 125 | if (size == 0) { | ||
| 126 | notify->size = 0; | ||
| 127 | notify->types = 1; | ||
| 128 | notify->index = chan->chid; | ||
| 129 | return 0; | ||
| 130 | } | ||
| 131 | return -ENOSYS; | ||
| 132 | } | ||
| 133 | |||
| 134 | static const struct nvkm_event_func | ||
| 135 | nvkm_fifo_kevent_func = { | ||
| 136 | .ctor = nvkm_fifo_kevent_ctor, | ||
| 137 | }; | ||
| 138 | |||
| 139 | static int | ||
| 140 | nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size, | ||
| 141 | struct nvkm_notify *notify) | ||
| 96 | { | 142 | { |
| 97 | if (size == 0) { | 143 | if (size == 0) { |
| 98 | notify->size = 0; | 144 | notify->size = 0; |
| @@ -104,10 +150,16 @@ nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, | |||
| 104 | } | 150 | } |
| 105 | 151 | ||
| 106 | static const struct nvkm_event_func | 152 | static const struct nvkm_event_func |
| 107 | nvkm_fifo_event_func = { | 153 | nvkm_fifo_cevent_func = { |
| 108 | .ctor = nvkm_fifo_event_ctor, | 154 | .ctor = nvkm_fifo_cevent_ctor, |
| 109 | }; | 155 | }; |
| 110 | 156 | ||
| 157 | void | ||
| 158 | nvkm_fifo_cevent(struct nvkm_fifo *fifo) | ||
| 159 | { | ||
| 160 | nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0); | ||
| 161 | } | ||
| 162 | |||
| 111 | static void | 163 | static void |
| 112 | nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index) | 164 | nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index) |
| 113 | { | 165 | { |
| @@ -241,6 +293,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine) | |||
| 241 | void *data = fifo; | 293 | void *data = fifo; |
| 242 | if (fifo->func->dtor) | 294 | if (fifo->func->dtor) |
| 243 | data = fifo->func->dtor(fifo); | 295 | data = fifo->func->dtor(fifo); |
| 296 | nvkm_event_fini(&fifo->kevent); | ||
| 244 | nvkm_event_fini(&fifo->cevent); | 297 | nvkm_event_fini(&fifo->cevent); |
| 245 | nvkm_event_fini(&fifo->uevent); | 298 | nvkm_event_fini(&fifo->uevent); |
| 246 | return data; | 299 | return data; |
| @@ -283,5 +336,9 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device, | |||
| 283 | return ret; | 336 | return ret; |
| 284 | } | 337 | } |
| 285 | 338 | ||
| 286 | return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent); | 339 | ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent); |
| 340 | if (ret) | ||
| 341 | return ret; | ||
| 342 | |||
| 343 | return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent); | ||
| 287 | } | 344 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index dc6d4678f228..fab760ae922f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c | |||
| @@ -371,9 +371,9 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func, | |||
| 371 | 371 | ||
| 372 | /* allocate push buffer ctxdma instance */ | 372 | /* allocate push buffer ctxdma instance */ |
| 373 | if (push) { | 373 | if (push) { |
| 374 | dmaobj = nvkm_dma_search(device->dma, oclass->client, push); | 374 | dmaobj = nvkm_dmaobj_search(client, push); |
| 375 | if (!dmaobj) | 375 | if (IS_ERR(dmaobj)) |
| 376 | return -ENOENT; | 376 | return PTR_ERR(dmaobj); |
| 377 | 377 | ||
| 378 | ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, | 378 | ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, |
| 379 | &chan->push); | 379 | &chan->push); |
| @@ -410,6 +410,6 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func, | |||
| 410 | base + user * chan->chid; | 410 | base + user * chan->chid; |
| 411 | chan->size = user; | 411 | chan->size = user; |
| 412 | 412 | ||
| 413 | nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0); | 413 | nvkm_fifo_cevent(fifo); |
| 414 | return 0; | 414 | return 0; |
| 415 | } | 415 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h index 55dc415c5c08..d8019bdacd61 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h | |||
| @@ -29,5 +29,5 @@ struct nvkm_fifo_chan_oclass { | |||
| 29 | struct nvkm_sclass base; | 29 | struct nvkm_sclass base; |
| 30 | }; | 30 | }; |
| 31 | 31 | ||
| 32 | int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **); | 32 | int gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **); |
| 33 | #endif | 33 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c index 15a992b3580a..61797c4dd07a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c | |||
| @@ -30,12 +30,12 @@ | |||
| 30 | 30 | ||
| 31 | #include <nvif/cl826e.h> | 31 | #include <nvif/cl826e.h> |
| 32 | 32 | ||
| 33 | int | 33 | static int |
| 34 | g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, | 34 | g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, |
| 35 | struct nvkm_event **pevent) | 35 | struct nvkm_event **pevent) |
| 36 | { | 36 | { |
| 37 | switch (type) { | 37 | switch (type) { |
| 38 | case G82_CHANNEL_DMA_V0_NTFY_UEVENT: | 38 | case NV826E_V0_NTFY_NON_STALL_INTERRUPT: |
| 39 | *pevent = &chan->fifo->uevent; | 39 | *pevent = &chan->fifo->uevent; |
| 40 | return 0; | 40 | return 0; |
| 41 | default: | 41 | default: |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c index ec68ea9747d5..cd468ab1db12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c | |||
| @@ -68,7 +68,14 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo) | |||
| 68 | } | 68 | } |
| 69 | nvkm_done(cur); | 69 | nvkm_done(cur); |
| 70 | 70 | ||
| 71 | target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0; | 71 | switch (nvkm_memory_target(cur)) { |
| 72 | case NVKM_MEM_TARGET_VRAM: target = 0; break; | ||
| 73 | case NVKM_MEM_TARGET_NCOH: target = 3; break; | ||
| 74 | default: | ||
| 75 | mutex_unlock(&subdev->mutex); | ||
| 76 | WARN_ON(1); | ||
| 77 | return; | ||
| 78 | } | ||
| 72 | 79 | ||
| 73 | nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) | | 80 | nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) | |
| 74 | (target << 28)); | 81 | (target << 28)); |
| @@ -183,6 +190,7 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine, | |||
| 183 | if (engine != &fifo->base.engine) | 190 | if (engine != &fifo->base.engine) |
| 184 | fifo->recover.mask |= 1ULL << engine->subdev.index; | 191 | fifo->recover.mask |= 1ULL << engine->subdev.index; |
| 185 | schedule_work(&fifo->recover.work); | 192 | schedule_work(&fifo->recover.work); |
| 193 | nvkm_fifo_kevent(&fifo->base, chid); | ||
| 186 | } | 194 | } |
| 187 | 195 | ||
| 188 | static const struct nvkm_enum | 196 | static const struct nvkm_enum |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index 38c0910722c0..3a24788c3185 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | |||
| @@ -27,11 +27,71 @@ | |||
| 27 | #include <core/client.h> | 27 | #include <core/client.h> |
| 28 | #include <core/gpuobj.h> | 28 | #include <core/gpuobj.h> |
| 29 | #include <subdev/bar.h> | 29 | #include <subdev/bar.h> |
| 30 | #include <subdev/timer.h> | ||
| 30 | #include <subdev/top.h> | 31 | #include <subdev/top.h> |
| 31 | #include <engine/sw.h> | 32 | #include <engine/sw.h> |
| 32 | 33 | ||
| 33 | #include <nvif/class.h> | 34 | #include <nvif/class.h> |
| 34 | 35 | ||
| 36 | struct gk104_fifo_engine_status { | ||
| 37 | bool busy; | ||
| 38 | bool faulted; | ||
| 39 | bool chsw; | ||
| 40 | bool save; | ||
| 41 | bool load; | ||
| 42 | struct { | ||
| 43 | bool tsg; | ||
| 44 | u32 id; | ||
| 45 | } prev, next, *chan; | ||
| 46 | }; | ||
| 47 | |||
| 48 | static void | ||
| 49 | gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, | ||
| 50 | struct gk104_fifo_engine_status *status) | ||
| 51 | { | ||
| 52 | struct nvkm_engine *engine = fifo->engine[engn].engine; | ||
| 53 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | ||
| 54 | struct nvkm_device *device = subdev->device; | ||
| 55 | u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); | ||
| 56 | |||
| 57 | status->busy = !!(stat & 0x80000000); | ||
| 58 | status->faulted = !!(stat & 0x40000000); | ||
| 59 | status->next.tsg = !!(stat & 0x10000000); | ||
| 60 | status->next.id = (stat & 0x0fff0000) >> 16; | ||
| 61 | status->chsw = !!(stat & 0x00008000); | ||
| 62 | status->save = !!(stat & 0x00004000); | ||
| 63 | status->load = !!(stat & 0x00002000); | ||
| 64 | status->prev.tsg = !!(stat & 0x00001000); | ||
| 65 | status->prev.id = (stat & 0x00000fff); | ||
| 66 | status->chan = NULL; | ||
| 67 | |||
| 68 | if (status->busy && status->chsw) { | ||
| 69 | if (status->load && status->save) { | ||
| 70 | if (engine && nvkm_engine_chsw_load(engine)) | ||
| 71 | status->chan = &status->next; | ||
| 72 | else | ||
| 73 | status->chan = &status->prev; | ||
| 74 | } else | ||
| 75 | if (status->load) { | ||
| 76 | status->chan = &status->next; | ||
| 77 | } else { | ||
| 78 | status->chan = &status->prev; | ||
| 79 | } | ||
| 80 | } else | ||
| 81 | if (status->load) { | ||
| 82 | status->chan = &status->prev; | ||
| 83 | } | ||
| 84 | |||
| 85 | nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " | ||
| 86 | "save %d load %d %sid %d%s-> %sid %d%s\n", | ||
| 87 | engn, status->busy, status->faulted, | ||
| 88 | status->chsw, status->save, status->load, | ||
| 89 | status->prev.tsg ? "tsg" : "ch", status->prev.id, | ||
| 90 | status->chan == &status->prev ? "*" : " ", | ||
| 91 | status->next.tsg ? "tsg" : "ch", status->next.id, | ||
| 92 | status->chan == &status->next ? "*" : " "); | ||
| 93 | } | ||
| 94 | |||
| 35 | static int | 95 | static int |
| 36 | gk104_fifo_class_get(struct nvkm_fifo *base, int index, | 96 | gk104_fifo_class_get(struct nvkm_fifo *base, int index, |
| 37 | const struct nvkm_fifo_chan_oclass **psclass) | 97 | const struct nvkm_fifo_chan_oclass **psclass) |
| @@ -83,10 +143,13 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) | |||
| 83 | } | 143 | } |
| 84 | nvkm_done(mem); | 144 | nvkm_done(mem); |
| 85 | 145 | ||
| 86 | if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM) | 146 | switch (nvkm_memory_target(mem)) { |
| 87 | target = 0; | 147 | case NVKM_MEM_TARGET_VRAM: target = 0; break; |
| 88 | else | 148 | case NVKM_MEM_TARGET_NCOH: target = 3; break; |
| 89 | target = 3; | 149 | default: |
| 150 | WARN_ON(1); | ||
| 151 | return; | ||
| 152 | } | ||
| 90 | 153 | ||
| 91 | nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | | 154 | nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | |
| 92 | (target << 28)); | 155 | (target << 28)); |
| @@ -149,31 +212,137 @@ gk104_fifo_recover_work(struct work_struct *w) | |||
| 149 | nvkm_mask(device, 0x002630, runm, 0x00000000); | 212 | nvkm_mask(device, 0x002630, runm, 0x00000000); |
| 150 | } | 213 | } |
| 151 | 214 | ||
| 215 | static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); | ||
| 216 | |||
| 152 | static void | 217 | static void |
| 153 | gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, | 218 | gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) |
| 154 | struct gk104_fifo_chan *chan) | ||
| 155 | { | 219 | { |
| 156 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | 220 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
| 157 | struct nvkm_device *device = subdev->device; | 221 | struct nvkm_device *device = subdev->device; |
| 158 | u32 chid = chan->base.chid; | 222 | const u32 runm = BIT(runl); |
| 159 | int engn; | ||
| 160 | 223 | ||
| 161 | nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n", | ||
| 162 | nvkm_subdev_name[engine->subdev.index], chid); | ||
| 163 | assert_spin_locked(&fifo->base.lock); | 224 | assert_spin_locked(&fifo->base.lock); |
| 225 | if (fifo->recover.runm & runm) | ||
| 226 | return; | ||
| 227 | fifo->recover.runm |= runm; | ||
| 164 | 228 | ||
| 165 | nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); | 229 | /* Block runlist to prevent channel assignment(s) from changing. */ |
| 166 | list_del_init(&chan->head); | 230 | nvkm_mask(device, 0x002630, runm, runm); |
| 167 | chan->killed = true; | ||
| 168 | 231 | ||
| 169 | for (engn = 0; engn < fifo->engine_nr; engn++) { | 232 | /* Schedule recovery. */ |
| 170 | if (fifo->engine[engn].engine == engine) { | 233 | nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); |
| 171 | fifo->recover.engm |= BIT(engn); | 234 | schedule_work(&fifo->recover.work); |
| 235 | } | ||
| 236 | |||
| 237 | static void | ||
| 238 | gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) | ||
| 239 | { | ||
| 240 | struct gk104_fifo *fifo = gk104_fifo(base); | ||
| 241 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | ||
| 242 | struct nvkm_device *device = subdev->device; | ||
| 243 | const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); | ||
| 244 | const u32 runl = (stat & 0x000f0000) >> 16; | ||
| 245 | const bool used = (stat & 0x00000001); | ||
| 246 | unsigned long engn, engm = fifo->runlist[runl].engm; | ||
| 247 | struct gk104_fifo_chan *chan; | ||
| 248 | |||
| 249 | assert_spin_locked(&fifo->base.lock); | ||
| 250 | if (!used) | ||
| 251 | return; | ||
| 252 | |||
| 253 | /* Lookup SW state for channel, and mark it as dead. */ | ||
| 254 | list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { | ||
| 255 | if (chan->base.chid == chid) { | ||
| 256 | list_del_init(&chan->head); | ||
| 257 | chan->killed = true; | ||
| 258 | nvkm_fifo_kevent(&fifo->base, chid); | ||
| 172 | break; | 259 | break; |
| 173 | } | 260 | } |
| 174 | } | 261 | } |
| 175 | 262 | ||
| 176 | fifo->recover.runm |= BIT(chan->runl); | 263 | /* Disable channel. */ |
| 264 | nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800); | ||
| 265 | nvkm_warn(subdev, "channel %d: killed\n", chid); | ||
| 266 | |||
| 267 | /* Block channel assignments from changing during recovery. */ | ||
| 268 | gk104_fifo_recover_runl(fifo, runl); | ||
| 269 | |||
| 270 | /* Schedule recovery for any engines the channel is on. */ | ||
| 271 | for_each_set_bit(engn, &engm, fifo->engine_nr) { | ||
| 272 | struct gk104_fifo_engine_status status; | ||
| 273 | gk104_fifo_engine_status(fifo, engn, &status); | ||
| 274 | if (!status.chan || status.chan->id != chid) | ||
| 275 | continue; | ||
| 276 | gk104_fifo_recover_engn(fifo, engn); | ||
| 277 | } | ||
| 278 | } | ||
| 279 | |||
| 280 | static void | ||
| 281 | gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) | ||
| 282 | { | ||
| 283 | struct nvkm_engine *engine = fifo->engine[engn].engine; | ||
| 284 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | ||
| 285 | struct nvkm_device *device = subdev->device; | ||
| 286 | const u32 runl = fifo->engine[engn].runl; | ||
| 287 | const u32 engm = BIT(engn); | ||
| 288 | struct gk104_fifo_engine_status status; | ||
| 289 | int mmui = -1; | ||
| 290 | |||
| 291 | assert_spin_locked(&fifo->base.lock); | ||
| 292 | if (fifo->recover.engm & engm) | ||
| 293 | return; | ||
| 294 | fifo->recover.engm |= engm; | ||
| 295 | |||
| 296 | /* Block channel assignments from changing during recovery. */ | ||
| 297 | gk104_fifo_recover_runl(fifo, runl); | ||
| 298 | |||
| 299 | /* Determine which channel (if any) is currently on the engine. */ | ||
| 300 | gk104_fifo_engine_status(fifo, engn, &status); | ||
| 301 | if (status.chan) { | ||
| 302 | /* The channel is not longer viable, kill it. */ | ||
| 303 | gk104_fifo_recover_chan(&fifo->base, status.chan->id); | ||
| 304 | } | ||
| 305 | |||
| 306 | /* Determine MMU fault ID for the engine, if we're not being | ||
| 307 | * called from the fault handler already. | ||
| 308 | */ | ||
| 309 | if (!status.faulted && engine) { | ||
| 310 | mmui = nvkm_top_fault_id(device, engine->subdev.index); | ||
| 311 | if (mmui < 0) { | ||
| 312 | const struct nvkm_enum *en = fifo->func->fault.engine; | ||
| 313 | for (; en && en->name; en++) { | ||
| 314 | if (en->data2 == engine->subdev.index) { | ||
| 315 | mmui = en->value; | ||
| 316 | break; | ||
| 317 | } | ||
| 318 | } | ||
| 319 | } | ||
| 320 | WARN_ON(mmui < 0); | ||
| 321 | } | ||
| 322 | |||
| 323 | /* Trigger a MMU fault for the engine. | ||
| 324 | * | ||
| 325 | * No good idea why this is needed, but nvgpu does something similar, | ||
| 326 | * and it makes recovery from CTXSW_TIMEOUT a lot more reliable. | ||
| 327 | */ | ||
| 328 | if (mmui >= 0) { | ||
| 329 | nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui); | ||
| 330 | |||
| 331 | /* Wait for fault to trigger. */ | ||
| 332 | nvkm_msec(device, 2000, | ||
| 333 | gk104_fifo_engine_status(fifo, engn, &status); | ||
| 334 | if (status.faulted) | ||
| 335 | break; | ||
| 336 | ); | ||
| 337 | |||
| 338 | /* Release MMU fault trigger, and ACK the fault. */ | ||
| 339 | nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000); | ||
| 340 | nvkm_wr32(device, 0x00259c, BIT(mmui)); | ||
| 341 | nvkm_wr32(device, 0x002100, 0x10000000); | ||
| 342 | } | ||
| 343 | |||
| 344 | /* Schedule recovery. */ | ||
| 345 | nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); | ||
| 177 | schedule_work(&fifo->recover.work); | 346 | schedule_work(&fifo->recover.work); |
| 178 | } | 347 | } |
| 179 | 348 | ||
| @@ -211,34 +380,30 @@ static void | |||
| 211 | gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) | 380 | gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) |
| 212 | { | 381 | { |
| 213 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 382 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
| 214 | struct gk104_fifo_chan *chan; | 383 | unsigned long flags, engm = 0; |
| 215 | unsigned long flags; | ||
| 216 | u32 engn; | 384 | u32 engn; |
| 217 | 385 | ||
| 386 | /* We need to ACK the SCHED_ERROR here, and prevent it reasserting, | ||
| 387 | * as MMU_FAULT cannot be triggered while it's pending. | ||
| 388 | */ | ||
| 218 | spin_lock_irqsave(&fifo->base.lock, flags); | 389 | spin_lock_irqsave(&fifo->base.lock, flags); |
| 390 | nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); | ||
| 391 | nvkm_wr32(device, 0x002100, 0x00000100); | ||
| 392 | |||
| 219 | for (engn = 0; engn < fifo->engine_nr; engn++) { | 393 | for (engn = 0; engn < fifo->engine_nr; engn++) { |
| 220 | struct nvkm_engine *engine = fifo->engine[engn].engine; | 394 | struct gk104_fifo_engine_status status; |
| 221 | int runl = fifo->engine[engn].runl; | 395 | |
| 222 | u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); | 396 | gk104_fifo_engine_status(fifo, engn, &status); |
| 223 | u32 busy = (stat & 0x80000000); | 397 | if (!status.busy || !status.chsw) |
| 224 | u32 next = (stat & 0x0fff0000) >> 16; | ||
| 225 | u32 chsw = (stat & 0x00008000); | ||
| 226 | u32 save = (stat & 0x00004000); | ||
| 227 | u32 load = (stat & 0x00002000); | ||
| 228 | u32 prev = (stat & 0x00000fff); | ||
| 229 | u32 chid = load ? next : prev; | ||
| 230 | (void)save; | ||
| 231 | |||
| 232 | if (!busy || !chsw) | ||
| 233 | continue; | 398 | continue; |
| 234 | 399 | ||
| 235 | list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { | 400 | engm |= BIT(engn); |
| 236 | if (chan->base.chid == chid && engine) { | ||
| 237 | gk104_fifo_recover(fifo, engine, chan); | ||
| 238 | break; | ||
| 239 | } | ||
| 240 | } | ||
| 241 | } | 401 | } |
| 402 | |||
| 403 | for_each_set_bit(engn, &engm, fifo->engine_nr) | ||
| 404 | gk104_fifo_recover_engn(fifo, engn); | ||
| 405 | |||
| 406 | nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); | ||
| 242 | spin_unlock_irqrestore(&fifo->base.lock, flags); | 407 | spin_unlock_irqrestore(&fifo->base.lock, flags); |
| 243 | } | 408 | } |
| 244 | 409 | ||
| @@ -301,6 +466,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) | |||
| 301 | struct nvkm_fifo_chan *chan; | 466 | struct nvkm_fifo_chan *chan; |
| 302 | unsigned long flags; | 467 | unsigned long flags; |
| 303 | char gpcid[8] = "", en[16] = ""; | 468 | char gpcid[8] = "", en[16] = ""; |
| 469 | int engn; | ||
| 304 | 470 | ||
| 305 | er = nvkm_enum_find(fifo->func->fault.reason, reason); | 471 | er = nvkm_enum_find(fifo->func->fault.reason, reason); |
| 306 | eu = nvkm_enum_find(fifo->func->fault.engine, unit); | 472 | eu = nvkm_enum_find(fifo->func->fault.engine, unit); |
| @@ -342,7 +508,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) | |||
| 342 | snprintf(en, sizeof(en), "%s", eu->name); | 508 | snprintf(en, sizeof(en), "%s", eu->name); |
| 343 | } | 509 | } |
| 344 | 510 | ||
| 345 | chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags); | 511 | spin_lock_irqsave(&fifo->base.lock, flags); |
| 512 | chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12); | ||
| 346 | 513 | ||
| 347 | nvkm_error(subdev, | 514 | nvkm_error(subdev, |
| 348 | "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " | 515 | "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " |
| @@ -353,9 +520,23 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) | |||
| 353 | (u64)inst << 12, | 520 | (u64)inst << 12, |
| 354 | chan ? chan->object.client->name : "unknown"); | 521 | chan ? chan->object.client->name : "unknown"); |
| 355 | 522 | ||
| 356 | if (engine && chan) | 523 | |
| 357 | gk104_fifo_recover(fifo, engine, (void *)chan); | 524 | /* Kill the channel that caused the fault. */ |
| 358 | nvkm_fifo_chan_put(&fifo->base, flags, &chan); | 525 | if (chan) |
| 526 | gk104_fifo_recover_chan(&fifo->base, chan->chid); | ||
| 527 | |||
| 528 | /* Channel recovery will probably have already done this for the | ||
| 529 | * correct engine(s), but just in case we can't find the channel | ||
| 530 | * information... | ||
| 531 | */ | ||
| 532 | for (engn = 0; engn < fifo->engine_nr && engine; engn++) { | ||
| 533 | if (fifo->engine[engn].engine == engine) { | ||
| 534 | gk104_fifo_recover_engn(fifo, engn); | ||
| 535 | break; | ||
| 536 | } | ||
| 537 | } | ||
| 538 | |||
| 539 | spin_unlock_irqrestore(&fifo->base.lock, flags); | ||
| 359 | } | 540 | } |
| 360 | 541 | ||
| 361 | static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { | 542 | static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { |
| @@ -716,6 +897,7 @@ gk104_fifo_ = { | |||
| 716 | .intr = gk104_fifo_intr, | 897 | .intr = gk104_fifo_intr, |
| 717 | .uevent_init = gk104_fifo_uevent_init, | 898 | .uevent_init = gk104_fifo_uevent_init, |
| 718 | .uevent_fini = gk104_fifo_uevent_fini, | 899 | .uevent_fini = gk104_fifo_uevent_fini, |
| 900 | .recover_chan = gk104_fifo_recover_chan, | ||
| 719 | .class_get = gk104_fifo_class_get, | 901 | .class_get = gk104_fifo_class_get, |
| 720 | }; | 902 | }; |
| 721 | 903 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c index 12d964260a29..f9e0377d3d24 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c | |||
| @@ -32,6 +32,23 @@ | |||
| 32 | #include <nvif/cl906f.h> | 32 | #include <nvif/cl906f.h> |
| 33 | #include <nvif/unpack.h> | 33 | #include <nvif/unpack.h> |
| 34 | 34 | ||
| 35 | int | ||
| 36 | gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, | ||
| 37 | struct nvkm_event **pevent) | ||
| 38 | { | ||
| 39 | switch (type) { | ||
| 40 | case NV906F_V0_NTFY_NON_STALL_INTERRUPT: | ||
| 41 | *pevent = &chan->fifo->uevent; | ||
| 42 | return 0; | ||
| 43 | case NV906F_V0_NTFY_KILLED: | ||
| 44 | *pevent = &chan->fifo->kevent; | ||
| 45 | return 0; | ||
| 46 | default: | ||
| 47 | break; | ||
| 48 | } | ||
| 49 | return -EINVAL; | ||
| 50 | } | ||
| 51 | |||
| 35 | static u32 | 52 | static u32 |
| 36 | gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) | 53 | gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) |
| 37 | { | 54 | { |
| @@ -184,7 +201,7 @@ gf100_fifo_gpfifo_func = { | |||
| 184 | .dtor = gf100_fifo_gpfifo_dtor, | 201 | .dtor = gf100_fifo_gpfifo_dtor, |
| 185 | .init = gf100_fifo_gpfifo_init, | 202 | .init = gf100_fifo_gpfifo_init, |
| 186 | .fini = gf100_fifo_gpfifo_fini, | 203 | .fini = gf100_fifo_gpfifo_fini, |
| 187 | .ntfy = g84_fifo_chan_ntfy, | 204 | .ntfy = gf100_fifo_chan_ntfy, |
| 188 | .engine_ctor = gf100_fifo_gpfifo_engine_ctor, | 205 | .engine_ctor = gf100_fifo_gpfifo_engine_ctor, |
| 189 | .engine_dtor = gf100_fifo_gpfifo_engine_dtor, | 206 | .engine_dtor = gf100_fifo_gpfifo_engine_dtor, |
| 190 | .engine_init = gf100_fifo_gpfifo_engine_init, | 207 | .engine_init = gf100_fifo_gpfifo_engine_init, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c index a2df4f3e7763..8abf6f8ef445 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c | |||
| @@ -50,6 +50,7 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan) | |||
| 50 | ) < 0) { | 50 | ) < 0) { |
| 51 | nvkm_error(subdev, "channel %d [%s] kick timeout\n", | 51 | nvkm_error(subdev, "channel %d [%s] kick timeout\n", |
| 52 | chan->base.chid, client->name); | 52 | chan->base.chid, client->name); |
| 53 | nvkm_fifo_recover_chan(&fifo->base, chan->base.chid); | ||
| 53 | ret = -ETIMEDOUT; | 54 | ret = -ETIMEDOUT; |
| 54 | } | 55 | } |
| 55 | mutex_unlock(&subdev->mutex); | 56 | mutex_unlock(&subdev->mutex); |
| @@ -213,7 +214,7 @@ gk104_fifo_gpfifo_func = { | |||
| 213 | .dtor = gk104_fifo_gpfifo_dtor, | 214 | .dtor = gk104_fifo_gpfifo_dtor, |
| 214 | .init = gk104_fifo_gpfifo_init, | 215 | .init = gk104_fifo_gpfifo_init, |
| 215 | .fini = gk104_fifo_gpfifo_fini, | 216 | .fini = gk104_fifo_gpfifo_fini, |
| 216 | .ntfy = g84_fifo_chan_ntfy, | 217 | .ntfy = gf100_fifo_chan_ntfy, |
| 217 | .engine_ctor = gk104_fifo_gpfifo_engine_ctor, | 218 | .engine_ctor = gk104_fifo_gpfifo_engine_ctor, |
| 218 | .engine_dtor = gk104_fifo_gpfifo_engine_dtor, | 219 | .engine_dtor = gk104_fifo_gpfifo_engine_dtor, |
| 219 | .engine_init = gk104_fifo_gpfifo_engine_init, | 220 | .engine_init = gk104_fifo_gpfifo_engine_init, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h index f6dfb37d9429..f889b13b5e41 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h | |||
| @@ -6,6 +6,12 @@ | |||
| 6 | int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, | 6 | int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, |
| 7 | int index, int nr, struct nvkm_fifo *); | 7 | int index, int nr, struct nvkm_fifo *); |
| 8 | void nvkm_fifo_uevent(struct nvkm_fifo *); | 8 | void nvkm_fifo_uevent(struct nvkm_fifo *); |
| 9 | void nvkm_fifo_cevent(struct nvkm_fifo *); | ||
| 10 | void nvkm_fifo_kevent(struct nvkm_fifo *, int chid); | ||
| 11 | void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid); | ||
| 12 | |||
| 13 | struct nvkm_fifo_chan * | ||
| 14 | nvkm_fifo_chan_inst_locked(struct nvkm_fifo *, u64 inst); | ||
| 9 | 15 | ||
| 10 | struct nvkm_fifo_chan_oclass; | 16 | struct nvkm_fifo_chan_oclass; |
| 11 | struct nvkm_fifo_func { | 17 | struct nvkm_fifo_func { |
| @@ -18,6 +24,7 @@ struct nvkm_fifo_func { | |||
| 18 | void (*start)(struct nvkm_fifo *, unsigned long *); | 24 | void (*start)(struct nvkm_fifo *, unsigned long *); |
| 19 | void (*uevent_init)(struct nvkm_fifo *); | 25 | void (*uevent_init)(struct nvkm_fifo *); |
| 20 | void (*uevent_fini)(struct nvkm_fifo *); | 26 | void (*uevent_fini)(struct nvkm_fifo *); |
| 27 | void (*recover_chan)(struct nvkm_fifo *, int chid); | ||
| 21 | int (*class_get)(struct nvkm_fifo *, int index, | 28 | int (*class_get)(struct nvkm_fifo *, int index, |
| 22 | const struct nvkm_fifo_chan_oclass **); | 29 | const struct nvkm_fifo_chan_oclass **); |
| 23 | const struct nvkm_fifo_chan_oclass *chan[]; | 30 | const struct nvkm_fifo_chan_oclass *chan[]; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c index 467065d1b4e6..cd8cf6f7024c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c | |||
| @@ -25,6 +25,15 @@ | |||
| 25 | 25 | ||
| 26 | #include <engine/fifo.h> | 26 | #include <engine/fifo.h> |
| 27 | 27 | ||
| 28 | static bool | ||
| 29 | nvkm_gr_chsw_load(struct nvkm_engine *engine) | ||
| 30 | { | ||
| 31 | struct nvkm_gr *gr = nvkm_gr(engine); | ||
| 32 | if (gr->func->chsw_load) | ||
| 33 | return gr->func->chsw_load(gr); | ||
| 34 | return false; | ||
| 35 | } | ||
| 36 | |||
| 28 | static void | 37 | static void |
| 29 | nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile) | 38 | nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile) |
| 30 | { | 39 | { |
| @@ -106,6 +115,15 @@ nvkm_gr_init(struct nvkm_engine *engine) | |||
| 106 | return gr->func->init(gr); | 115 | return gr->func->init(gr); |
| 107 | } | 116 | } |
| 108 | 117 | ||
| 118 | static int | ||
| 119 | nvkm_gr_fini(struct nvkm_engine *engine, bool suspend) | ||
| 120 | { | ||
| 121 | struct nvkm_gr *gr = nvkm_gr(engine); | ||
| 122 | if (gr->func->fini) | ||
| 123 | return gr->func->fini(gr, suspend); | ||
| 124 | return 0; | ||
| 125 | } | ||
| 126 | |||
| 109 | static void * | 127 | static void * |
| 110 | nvkm_gr_dtor(struct nvkm_engine *engine) | 128 | nvkm_gr_dtor(struct nvkm_engine *engine) |
| 111 | { | 129 | { |
| @@ -120,8 +138,10 @@ nvkm_gr = { | |||
| 120 | .dtor = nvkm_gr_dtor, | 138 | .dtor = nvkm_gr_dtor, |
| 121 | .oneinit = nvkm_gr_oneinit, | 139 | .oneinit = nvkm_gr_oneinit, |
| 122 | .init = nvkm_gr_init, | 140 | .init = nvkm_gr_init, |
| 141 | .fini = nvkm_gr_fini, | ||
| 123 | .intr = nvkm_gr_intr, | 142 | .intr = nvkm_gr_intr, |
| 124 | .tile = nvkm_gr_tile, | 143 | .tile = nvkm_gr_tile, |
| 144 | .chsw_load = nvkm_gr_chsw_load, | ||
| 125 | .fifo.cclass = nvkm_gr_cclass_new, | 145 | .fifo.cclass = nvkm_gr_cclass_new, |
| 126 | .fifo.sclass = nvkm_gr_oclass_get, | 146 | .fifo.sclass = nvkm_gr_oclass_get, |
| 127 | }; | 147 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c index ce913300539f..da1ba74682b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | 25 | ||
| 26 | #include <subdev/timer.h> | 26 | #include <subdev/timer.h> |
| 27 | 27 | ||
| 28 | #include <nvif/class.h> | ||
| 29 | |||
| 28 | static const struct nvkm_bitfield nv50_gr_status[] = { | 30 | static const struct nvkm_bitfield nv50_gr_status[] = { |
| 29 | { 0x00000001, "BUSY" }, /* set when any bit is set */ | 31 | { 0x00000001, "BUSY" }, /* set when any bit is set */ |
| 30 | { 0x00000002, "DISPATCH" }, | 32 | { 0x00000002, "DISPATCH" }, |
| @@ -180,11 +182,11 @@ g84_gr = { | |||
| 180 | .tlb_flush = g84_gr_tlb_flush, | 182 | .tlb_flush = g84_gr_tlb_flush, |
| 181 | .units = nv50_gr_units, | 183 | .units = nv50_gr_units, |
| 182 | .sclass = { | 184 | .sclass = { |
| 183 | { -1, -1, 0x0030, &nv50_gr_object }, | 185 | { -1, -1, NV_NULL_CLASS, &nv50_gr_object }, |
| 184 | { -1, -1, 0x502d, &nv50_gr_object }, | 186 | { -1, -1, NV50_TWOD, &nv50_gr_object }, |
| 185 | { -1, -1, 0x5039, &nv50_gr_object }, | 187 | { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object }, |
| 186 | { -1, -1, 0x50c0, &nv50_gr_object }, | 188 | { -1, -1, NV50_COMPUTE, &nv50_gr_object }, |
| 187 | { -1, -1, 0x8297, &nv50_gr_object }, | 189 | { -1, -1, G82_TESLA, &nv50_gr_object }, |
| 188 | {} | 190 | {} |
| 189 | } | 191 | } |
| 190 | }; | 192 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index f65a5b0a1a4d..f9acb8a944d2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | |||
| @@ -702,6 +702,22 @@ gf100_gr_pack_mmio[] = { | |||
| 702 | * PGRAPH engine/subdev functions | 702 | * PGRAPH engine/subdev functions |
| 703 | ******************************************************************************/ | 703 | ******************************************************************************/ |
| 704 | 704 | ||
| 705 | static bool | ||
| 706 | gf100_gr_chsw_load(struct nvkm_gr *base) | ||
| 707 | { | ||
| 708 | struct gf100_gr *gr = gf100_gr(base); | ||
| 709 | if (!gr->firmware) { | ||
| 710 | u32 trace = nvkm_rd32(gr->base.engine.subdev.device, 0x40981c); | ||
| 711 | if (trace & 0x00000040) | ||
| 712 | return true; | ||
| 713 | } else { | ||
| 714 | u32 mthd = nvkm_rd32(gr->base.engine.subdev.device, 0x409808); | ||
| 715 | if (mthd & 0x00080000) | ||
| 716 | return true; | ||
| 717 | } | ||
| 718 | return false; | ||
| 719 | } | ||
| 720 | |||
| 705 | int | 721 | int |
| 706 | gf100_gr_rops(struct gf100_gr *gr) | 722 | gf100_gr_rops(struct gf100_gr *gr) |
| 707 | { | 723 | { |
| @@ -1136,7 +1152,7 @@ gf100_gr_trap_intr(struct gf100_gr *gr) | |||
| 1136 | if (trap & 0x00000008) { | 1152 | if (trap & 0x00000008) { |
| 1137 | u32 stat = nvkm_rd32(device, 0x408030); | 1153 | u32 stat = nvkm_rd32(device, 0x408030); |
| 1138 | 1154 | ||
| 1139 | nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error, | 1155 | nvkm_snprintbf(error, sizeof(error), gf100_ccache_error, |
| 1140 | stat & 0x3fffffff); | 1156 | stat & 0x3fffffff); |
| 1141 | nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error); | 1157 | nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error); |
| 1142 | nvkm_wr32(device, 0x408030, 0xc0000000); | 1158 | nvkm_wr32(device, 0x408030, 0xc0000000); |
| @@ -1391,26 +1407,11 @@ gf100_gr_intr(struct nvkm_gr *base) | |||
| 1391 | } | 1407 | } |
| 1392 | 1408 | ||
| 1393 | static void | 1409 | static void |
| 1394 | gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base, | 1410 | gf100_gr_init_fw(struct nvkm_falcon *falcon, |
| 1395 | struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) | 1411 | struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) |
| 1396 | { | 1412 | { |
| 1397 | struct nvkm_device *device = gr->base.engine.subdev.device; | 1413 | nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0); |
| 1398 | int i; | 1414 | nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false); |
| 1399 | |||
| 1400 | nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000); | ||
| 1401 | for (i = 0; i < data->size / 4; i++) | ||
| 1402 | nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]); | ||
| 1403 | |||
| 1404 | nvkm_wr32(device, fuc_base + 0x0180, 0x01000000); | ||
| 1405 | for (i = 0; i < code->size / 4; i++) { | ||
| 1406 | if ((i & 0x3f) == 0) | ||
| 1407 | nvkm_wr32(device, fuc_base + 0x0188, i >> 6); | ||
| 1408 | nvkm_wr32(device, fuc_base + 0x0184, code->data[i]); | ||
| 1409 | } | ||
| 1410 | |||
| 1411 | /* code must be padded to 0x40 words */ | ||
| 1412 | for (; i & 0x3f; i++) | ||
| 1413 | nvkm_wr32(device, fuc_base + 0x0184, 0); | ||
| 1414 | } | 1415 | } |
| 1415 | 1416 | ||
| 1416 | static void | 1417 | static void |
| @@ -1455,162 +1456,149 @@ gf100_gr_init_csdata(struct gf100_gr *gr, | |||
| 1455 | nvkm_wr32(device, falcon + 0x01c4, star + 4); | 1456 | nvkm_wr32(device, falcon + 0x01c4, star + 4); |
| 1456 | } | 1457 | } |
| 1457 | 1458 | ||
| 1458 | int | 1459 | /* Initialize context from an external (secure or not) firmware */ |
| 1459 | gf100_gr_init_ctxctl(struct gf100_gr *gr) | 1460 | static int |
| 1461 | gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) | ||
| 1460 | { | 1462 | { |
| 1461 | const struct gf100_grctx_func *grctx = gr->func->grctx; | ||
| 1462 | struct nvkm_subdev *subdev = &gr->base.engine.subdev; | 1463 | struct nvkm_subdev *subdev = &gr->base.engine.subdev; |
| 1463 | struct nvkm_device *device = subdev->device; | 1464 | struct nvkm_device *device = subdev->device; |
| 1464 | struct nvkm_secboot *sb = device->secboot; | 1465 | struct nvkm_secboot *sb = device->secboot; |
| 1465 | int i; | ||
| 1466 | int ret = 0; | 1466 | int ret = 0; |
| 1467 | 1467 | ||
| 1468 | if (gr->firmware) { | 1468 | /* load fuc microcode */ |
| 1469 | /* load fuc microcode */ | 1469 | nvkm_mc_unk260(device, 0); |
| 1470 | nvkm_mc_unk260(device, 0); | ||
| 1471 | |||
| 1472 | /* securely-managed falcons must be reset using secure boot */ | ||
| 1473 | if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) | ||
| 1474 | ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS); | ||
| 1475 | else | ||
| 1476 | gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, | ||
| 1477 | &gr->fuc409d); | ||
| 1478 | if (ret) | ||
| 1479 | return ret; | ||
| 1480 | 1470 | ||
| 1481 | if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) | 1471 | /* securely-managed falcons must be reset using secure boot */ |
| 1482 | ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); | 1472 | if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) |
| 1483 | else | 1473 | ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS); |
| 1484 | gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, | 1474 | else |
| 1485 | &gr->fuc41ad); | 1475 | gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d); |
| 1486 | if (ret) | 1476 | if (ret) |
| 1487 | return ret; | 1477 | return ret; |
| 1488 | 1478 | ||
| 1489 | nvkm_mc_unk260(device, 1); | 1479 | if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) |
| 1490 | 1480 | ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); | |
| 1491 | /* start both of them running */ | 1481 | else |
| 1492 | nvkm_wr32(device, 0x409840, 0xffffffff); | 1482 | gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad); |
| 1493 | nvkm_wr32(device, 0x41a10c, 0x00000000); | 1483 | if (ret) |
| 1494 | nvkm_wr32(device, 0x40910c, 0x00000000); | 1484 | return ret; |
| 1495 | 1485 | ||
| 1496 | if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) | 1486 | nvkm_mc_unk260(device, 1); |
| 1497 | nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_GPCCS); | 1487 | |
| 1498 | else | 1488 | /* start both of them running */ |
| 1499 | nvkm_wr32(device, 0x41a100, 0x00000002); | 1489 | nvkm_wr32(device, 0x409840, 0xffffffff); |
| 1500 | if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) | 1490 | nvkm_wr32(device, 0x41a10c, 0x00000000); |
| 1501 | nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_FECS); | 1491 | nvkm_wr32(device, 0x40910c, 0x00000000); |
| 1502 | else | 1492 | |
| 1503 | nvkm_wr32(device, 0x409100, 0x00000002); | 1493 | nvkm_falcon_start(gr->gpccs); |
| 1504 | if (nvkm_msec(device, 2000, | 1494 | nvkm_falcon_start(gr->fecs); |
| 1505 | if (nvkm_rd32(device, 0x409800) & 0x00000001) | ||
| 1506 | break; | ||
| 1507 | ) < 0) | ||
| 1508 | return -EBUSY; | ||
| 1509 | 1495 | ||
| 1510 | nvkm_wr32(device, 0x409840, 0xffffffff); | 1496 | if (nvkm_msec(device, 2000, |
| 1511 | nvkm_wr32(device, 0x409500, 0x7fffffff); | 1497 | if (nvkm_rd32(device, 0x409800) & 0x00000001) |
| 1512 | nvkm_wr32(device, 0x409504, 0x00000021); | 1498 | break; |
| 1499 | ) < 0) | ||
| 1500 | return -EBUSY; | ||
| 1501 | |||
| 1502 | nvkm_wr32(device, 0x409840, 0xffffffff); | ||
| 1503 | nvkm_wr32(device, 0x409500, 0x7fffffff); | ||
| 1504 | nvkm_wr32(device, 0x409504, 0x00000021); | ||
| 1505 | |||
| 1506 | nvkm_wr32(device, 0x409840, 0xffffffff); | ||
| 1507 | nvkm_wr32(device, 0x409500, 0x00000000); | ||
| 1508 | nvkm_wr32(device, 0x409504, 0x00000010); | ||
| 1509 | if (nvkm_msec(device, 2000, | ||
| 1510 | if ((gr->size = nvkm_rd32(device, 0x409800))) | ||
| 1511 | break; | ||
| 1512 | ) < 0) | ||
| 1513 | return -EBUSY; | ||
| 1514 | |||
| 1515 | nvkm_wr32(device, 0x409840, 0xffffffff); | ||
| 1516 | nvkm_wr32(device, 0x409500, 0x00000000); | ||
| 1517 | nvkm_wr32(device, 0x409504, 0x00000016); | ||
| 1518 | if (nvkm_msec(device, 2000, | ||
| 1519 | if (nvkm_rd32(device, 0x409800)) | ||
| 1520 | break; | ||
| 1521 | ) < 0) | ||
| 1522 | return -EBUSY; | ||
| 1523 | |||
| 1524 | nvkm_wr32(device, 0x409840, 0xffffffff); | ||
| 1525 | nvkm_wr32(device, 0x409500, 0x00000000); | ||
| 1526 | nvkm_wr32(device, 0x409504, 0x00000025); | ||
| 1527 | if (nvkm_msec(device, 2000, | ||
| 1528 | if (nvkm_rd32(device, 0x409800)) | ||
| 1529 | break; | ||
| 1530 | ) < 0) | ||
| 1531 | return -EBUSY; | ||
| 1513 | 1532 | ||
| 1514 | nvkm_wr32(device, 0x409840, 0xffffffff); | 1533 | if (device->chipset >= 0xe0) { |
| 1515 | nvkm_wr32(device, 0x409500, 0x00000000); | 1534 | nvkm_wr32(device, 0x409800, 0x00000000); |
| 1516 | nvkm_wr32(device, 0x409504, 0x00000010); | 1535 | nvkm_wr32(device, 0x409500, 0x00000001); |
| 1536 | nvkm_wr32(device, 0x409504, 0x00000030); | ||
| 1517 | if (nvkm_msec(device, 2000, | 1537 | if (nvkm_msec(device, 2000, |
| 1518 | if ((gr->size = nvkm_rd32(device, 0x409800))) | 1538 | if (nvkm_rd32(device, 0x409800)) |
| 1519 | break; | 1539 | break; |
| 1520 | ) < 0) | 1540 | ) < 0) |
| 1521 | return -EBUSY; | 1541 | return -EBUSY; |
| 1522 | 1542 | ||
| 1523 | nvkm_wr32(device, 0x409840, 0xffffffff); | 1543 | nvkm_wr32(device, 0x409810, 0xb00095c8); |
| 1524 | nvkm_wr32(device, 0x409500, 0x00000000); | 1544 | nvkm_wr32(device, 0x409800, 0x00000000); |
| 1525 | nvkm_wr32(device, 0x409504, 0x00000016); | 1545 | nvkm_wr32(device, 0x409500, 0x00000001); |
| 1546 | nvkm_wr32(device, 0x409504, 0x00000031); | ||
| 1526 | if (nvkm_msec(device, 2000, | 1547 | if (nvkm_msec(device, 2000, |
| 1527 | if (nvkm_rd32(device, 0x409800)) | 1548 | if (nvkm_rd32(device, 0x409800)) |
| 1528 | break; | 1549 | break; |
| 1529 | ) < 0) | 1550 | ) < 0) |
| 1530 | return -EBUSY; | 1551 | return -EBUSY; |
| 1531 | 1552 | ||
| 1532 | nvkm_wr32(device, 0x409840, 0xffffffff); | 1553 | nvkm_wr32(device, 0x409810, 0x00080420); |
| 1533 | nvkm_wr32(device, 0x409500, 0x00000000); | 1554 | nvkm_wr32(device, 0x409800, 0x00000000); |
| 1534 | nvkm_wr32(device, 0x409504, 0x00000025); | 1555 | nvkm_wr32(device, 0x409500, 0x00000001); |
| 1556 | nvkm_wr32(device, 0x409504, 0x00000032); | ||
| 1535 | if (nvkm_msec(device, 2000, | 1557 | if (nvkm_msec(device, 2000, |
| 1536 | if (nvkm_rd32(device, 0x409800)) | 1558 | if (nvkm_rd32(device, 0x409800)) |
| 1537 | break; | 1559 | break; |
| 1538 | ) < 0) | 1560 | ) < 0) |
| 1539 | return -EBUSY; | 1561 | return -EBUSY; |
| 1540 | 1562 | ||
| 1541 | if (device->chipset >= 0xe0) { | 1563 | nvkm_wr32(device, 0x409614, 0x00000070); |
| 1542 | nvkm_wr32(device, 0x409800, 0x00000000); | 1564 | nvkm_wr32(device, 0x409614, 0x00000770); |
| 1543 | nvkm_wr32(device, 0x409500, 0x00000001); | 1565 | nvkm_wr32(device, 0x40802c, 0x00000001); |
| 1544 | nvkm_wr32(device, 0x409504, 0x00000030); | 1566 | } |
| 1545 | if (nvkm_msec(device, 2000, | ||
| 1546 | if (nvkm_rd32(device, 0x409800)) | ||
| 1547 | break; | ||
| 1548 | ) < 0) | ||
| 1549 | return -EBUSY; | ||
| 1550 | |||
| 1551 | nvkm_wr32(device, 0x409810, 0xb00095c8); | ||
| 1552 | nvkm_wr32(device, 0x409800, 0x00000000); | ||
| 1553 | nvkm_wr32(device, 0x409500, 0x00000001); | ||
| 1554 | nvkm_wr32(device, 0x409504, 0x00000031); | ||
| 1555 | if (nvkm_msec(device, 2000, | ||
| 1556 | if (nvkm_rd32(device, 0x409800)) | ||
| 1557 | break; | ||
| 1558 | ) < 0) | ||
| 1559 | return -EBUSY; | ||
| 1560 | |||
| 1561 | nvkm_wr32(device, 0x409810, 0x00080420); | ||
| 1562 | nvkm_wr32(device, 0x409800, 0x00000000); | ||
| 1563 | nvkm_wr32(device, 0x409500, 0x00000001); | ||
| 1564 | nvkm_wr32(device, 0x409504, 0x00000032); | ||
| 1565 | if (nvkm_msec(device, 2000, | ||
| 1566 | if (nvkm_rd32(device, 0x409800)) | ||
| 1567 | break; | ||
| 1568 | ) < 0) | ||
| 1569 | return -EBUSY; | ||
| 1570 | 1567 | ||
| 1571 | nvkm_wr32(device, 0x409614, 0x00000070); | 1568 | if (gr->data == NULL) { |
| 1572 | nvkm_wr32(device, 0x409614, 0x00000770); | 1569 | int ret = gf100_grctx_generate(gr); |
| 1573 | nvkm_wr32(device, 0x40802c, 0x00000001); | 1570 | if (ret) { |
| 1571 | nvkm_error(subdev, "failed to construct context\n"); | ||
| 1572 | return ret; | ||
| 1574 | } | 1573 | } |
| 1574 | } | ||
| 1575 | 1575 | ||
| 1576 | if (gr->data == NULL) { | 1576 | return 0; |
| 1577 | int ret = gf100_grctx_generate(gr); | 1577 | } |
| 1578 | if (ret) { | 1578 | |
| 1579 | nvkm_error(subdev, "failed to construct context\n"); | 1579 | static int |
| 1580 | return ret; | 1580 | gf100_gr_init_ctxctl_int(struct gf100_gr *gr) |
| 1581 | } | 1581 | { |
| 1582 | } | 1582 | const struct gf100_grctx_func *grctx = gr->func->grctx; |
| 1583 | struct nvkm_subdev *subdev = &gr->base.engine.subdev; | ||
| 1584 | struct nvkm_device *device = subdev->device; | ||
| 1583 | 1585 | ||
| 1584 | return 0; | ||
| 1585 | } else | ||
| 1586 | if (!gr->func->fecs.ucode) { | 1586 | if (!gr->func->fecs.ucode) { |
| 1587 | return -ENOSYS; | 1587 | return -ENOSYS; |
| 1588 | } | 1588 | } |
| 1589 | 1589 | ||
| 1590 | /* load HUB microcode */ | 1590 | /* load HUB microcode */ |
| 1591 | nvkm_mc_unk260(device, 0); | 1591 | nvkm_mc_unk260(device, 0); |
| 1592 | nvkm_wr32(device, 0x4091c0, 0x01000000); | 1592 | nvkm_falcon_load_dmem(gr->fecs, gr->func->fecs.ucode->data.data, 0x0, |
| 1593 | for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++) | 1593 | gr->func->fecs.ucode->data.size, 0); |
| 1594 | nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]); | 1594 | nvkm_falcon_load_imem(gr->fecs, gr->func->fecs.ucode->code.data, 0x0, |
| 1595 | 1595 | gr->func->fecs.ucode->code.size, 0, 0, false); | |
| 1596 | nvkm_wr32(device, 0x409180, 0x01000000); | ||
| 1597 | for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) { | ||
| 1598 | if ((i & 0x3f) == 0) | ||
| 1599 | nvkm_wr32(device, 0x409188, i >> 6); | ||
| 1600 | nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]); | ||
| 1601 | } | ||
| 1602 | 1596 | ||
| 1603 | /* load GPC microcode */ | 1597 | /* load GPC microcode */ |
| 1604 | nvkm_wr32(device, 0x41a1c0, 0x01000000); | 1598 | nvkm_falcon_load_dmem(gr->gpccs, gr->func->gpccs.ucode->data.data, 0x0, |
| 1605 | for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++) | 1599 | gr->func->gpccs.ucode->data.size, 0); |
| 1606 | nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]); | 1600 | nvkm_falcon_load_imem(gr->gpccs, gr->func->gpccs.ucode->code.data, 0x0, |
| 1607 | 1601 | gr->func->gpccs.ucode->code.size, 0, 0, false); | |
| 1608 | nvkm_wr32(device, 0x41a180, 0x01000000); | ||
| 1609 | for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) { | ||
| 1610 | if ((i & 0x3f) == 0) | ||
| 1611 | nvkm_wr32(device, 0x41a188, i >> 6); | ||
| 1612 | nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]); | ||
| 1613 | } | ||
| 1614 | nvkm_mc_unk260(device, 1); | 1602 | nvkm_mc_unk260(device, 1); |
| 1615 | 1603 | ||
| 1616 | /* load register lists */ | 1604 | /* load register lists */ |
| @@ -1642,6 +1630,19 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) | |||
| 1642 | return 0; | 1630 | return 0; |
| 1643 | } | 1631 | } |
| 1644 | 1632 | ||
| 1633 | int | ||
| 1634 | gf100_gr_init_ctxctl(struct gf100_gr *gr) | ||
| 1635 | { | ||
| 1636 | int ret; | ||
| 1637 | |||
| 1638 | if (gr->firmware) | ||
| 1639 | ret = gf100_gr_init_ctxctl_ext(gr); | ||
| 1640 | else | ||
| 1641 | ret = gf100_gr_init_ctxctl_int(gr); | ||
| 1642 | |||
| 1643 | return ret; | ||
| 1644 | } | ||
| 1645 | |||
| 1645 | static int | 1646 | static int |
| 1646 | gf100_gr_oneinit(struct nvkm_gr *base) | 1647 | gf100_gr_oneinit(struct nvkm_gr *base) |
| 1647 | { | 1648 | { |
| @@ -1711,10 +1712,32 @@ static int | |||
| 1711 | gf100_gr_init_(struct nvkm_gr *base) | 1712 | gf100_gr_init_(struct nvkm_gr *base) |
| 1712 | { | 1713 | { |
| 1713 | struct gf100_gr *gr = gf100_gr(base); | 1714 | struct gf100_gr *gr = gf100_gr(base); |
| 1715 | struct nvkm_subdev *subdev = &base->engine.subdev; | ||
| 1716 | u32 ret; | ||
| 1717 | |||
| 1714 | nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); | 1718 | nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); |
| 1719 | |||
| 1720 | ret = nvkm_falcon_get(gr->fecs, subdev); | ||
| 1721 | if (ret) | ||
| 1722 | return ret; | ||
| 1723 | |||
| 1724 | ret = nvkm_falcon_get(gr->gpccs, subdev); | ||
| 1725 | if (ret) | ||
| 1726 | return ret; | ||
| 1727 | |||
| 1715 | return gr->func->init(gr); | 1728 | return gr->func->init(gr); |
| 1716 | } | 1729 | } |
| 1717 | 1730 | ||
| 1731 | static int | ||
| 1732 | gf100_gr_fini_(struct nvkm_gr *base, bool suspend) | ||
| 1733 | { | ||
| 1734 | struct gf100_gr *gr = gf100_gr(base); | ||
| 1735 | struct nvkm_subdev *subdev = &gr->base.engine.subdev; | ||
| 1736 | nvkm_falcon_put(gr->gpccs, subdev); | ||
| 1737 | nvkm_falcon_put(gr->fecs, subdev); | ||
| 1738 | return 0; | ||
| 1739 | } | ||
| 1740 | |||
| 1718 | void | 1741 | void |
| 1719 | gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) | 1742 | gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) |
| 1720 | { | 1743 | { |
| @@ -1737,6 +1760,9 @@ gf100_gr_dtor(struct nvkm_gr *base) | |||
| 1737 | gr->func->dtor(gr); | 1760 | gr->func->dtor(gr); |
| 1738 | kfree(gr->data); | 1761 | kfree(gr->data); |
| 1739 | 1762 | ||
| 1763 | nvkm_falcon_del(&gr->gpccs); | ||
| 1764 | nvkm_falcon_del(&gr->fecs); | ||
| 1765 | |||
| 1740 | gf100_gr_dtor_fw(&gr->fuc409c); | 1766 | gf100_gr_dtor_fw(&gr->fuc409c); |
| 1741 | gf100_gr_dtor_fw(&gr->fuc409d); | 1767 | gf100_gr_dtor_fw(&gr->fuc409d); |
| 1742 | gf100_gr_dtor_fw(&gr->fuc41ac); | 1768 | gf100_gr_dtor_fw(&gr->fuc41ac); |
| @@ -1755,10 +1781,12 @@ gf100_gr_ = { | |||
| 1755 | .dtor = gf100_gr_dtor, | 1781 | .dtor = gf100_gr_dtor, |
| 1756 | .oneinit = gf100_gr_oneinit, | 1782 | .oneinit = gf100_gr_oneinit, |
| 1757 | .init = gf100_gr_init_, | 1783 | .init = gf100_gr_init_, |
| 1784 | .fini = gf100_gr_fini_, | ||
| 1758 | .intr = gf100_gr_intr, | 1785 | .intr = gf100_gr_intr, |
| 1759 | .units = gf100_gr_units, | 1786 | .units = gf100_gr_units, |
| 1760 | .chan_new = gf100_gr_chan_new, | 1787 | .chan_new = gf100_gr_chan_new, |
| 1761 | .object_get = gf100_gr_object_get, | 1788 | .object_get = gf100_gr_object_get, |
| 1789 | .chsw_load = gf100_gr_chsw_load, | ||
| 1762 | }; | 1790 | }; |
| 1763 | 1791 | ||
| 1764 | int | 1792 | int |
| @@ -1828,6 +1856,7 @@ int | |||
| 1828 | gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, | 1856 | gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, |
| 1829 | int index, struct gf100_gr *gr) | 1857 | int index, struct gf100_gr *gr) |
| 1830 | { | 1858 | { |
| 1859 | struct nvkm_subdev *subdev = &gr->base.engine.subdev; | ||
| 1831 | int ret; | 1860 | int ret; |
| 1832 | 1861 | ||
| 1833 | gr->func = func; | 1862 | gr->func = func; |
| @@ -1840,7 +1869,11 @@ gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, | |||
| 1840 | if (ret) | 1869 | if (ret) |
| 1841 | return ret; | 1870 | return ret; |
| 1842 | 1871 | ||
| 1843 | return 0; | 1872 | ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs); |
| 1873 | if (ret) | ||
| 1874 | return ret; | ||
| 1875 | |||
| 1876 | return nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs); | ||
| 1844 | } | 1877 | } |
| 1845 | 1878 | ||
| 1846 | int | 1879 | int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 268b8d60ff73..db6ee3b06841 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <core/gpuobj.h> | 29 | #include <core/gpuobj.h> |
| 30 | #include <subdev/ltc.h> | 30 | #include <subdev/ltc.h> |
| 31 | #include <subdev/mmu.h> | 31 | #include <subdev/mmu.h> |
| 32 | #include <engine/falcon.h> | ||
| 32 | 33 | ||
| 33 | #define GPC_MAX 32 | 34 | #define GPC_MAX 32 |
| 34 | #define TPC_MAX_PER_GPC 8 | 35 | #define TPC_MAX_PER_GPC 8 |
| @@ -75,6 +76,8 @@ struct gf100_gr { | |||
| 75 | const struct gf100_gr_func *func; | 76 | const struct gf100_gr_func *func; |
| 76 | struct nvkm_gr base; | 77 | struct nvkm_gr base; |
| 77 | 78 | ||
| 79 | struct nvkm_falcon *fecs; | ||
| 80 | struct nvkm_falcon *gpccs; | ||
| 78 | struct gf100_gr_fuc fuc409c; | 81 | struct gf100_gr_fuc fuc409c; |
| 79 | struct gf100_gr_fuc fuc409d; | 82 | struct gf100_gr_fuc fuc409d; |
| 80 | struct gf100_gr_fuc fuc41ac; | 83 | struct gf100_gr_fuc fuc41ac; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c index 2e68919f00b2..c711a55ce392 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | */ | 23 | */ |
| 24 | #include "nv50.h" | 24 | #include "nv50.h" |
| 25 | 25 | ||
| 26 | #include <nvif/class.h> | ||
| 27 | |||
| 26 | static const struct nvkm_gr_func | 28 | static const struct nvkm_gr_func |
| 27 | gt200_gr = { | 29 | gt200_gr = { |
| 28 | .init = nv50_gr_init, | 30 | .init = nv50_gr_init, |
| @@ -31,11 +33,11 @@ gt200_gr = { | |||
| 31 | .tlb_flush = g84_gr_tlb_flush, | 33 | .tlb_flush = g84_gr_tlb_flush, |
| 32 | .units = nv50_gr_units, | 34 | .units = nv50_gr_units, |
| 33 | .sclass = { | 35 | .sclass = { |
| 34 | { -1, -1, 0x0030, &nv50_gr_object }, | 36 | { -1, -1, NV_NULL_CLASS, &nv50_gr_object }, |
| 35 | { -1, -1, 0x502d, &nv50_gr_object }, | 37 | { -1, -1, NV50_TWOD, &nv50_gr_object }, |
| 36 | { -1, -1, 0x5039, &nv50_gr_object }, | 38 | { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object }, |
| 37 | { -1, -1, 0x50c0, &nv50_gr_object }, | 39 | { -1, -1, NV50_COMPUTE, &nv50_gr_object }, |
| 38 | { -1, -1, 0x8397, &nv50_gr_object }, | 40 | { -1, -1, GT200_TESLA, &nv50_gr_object }, |
| 39 | {} | 41 | {} |
| 40 | } | 42 | } |
| 41 | }; | 43 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c index 2bf7aac360cc..fa103df32ec7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | */ | 23 | */ |
| 24 | #include "nv50.h" | 24 | #include "nv50.h" |
| 25 | 25 | ||
| 26 | #include <nvif/class.h> | ||
| 27 | |||
| 26 | static const struct nvkm_gr_func | 28 | static const struct nvkm_gr_func |
| 27 | gt215_gr = { | 29 | gt215_gr = { |
| 28 | .init = nv50_gr_init, | 30 | .init = nv50_gr_init, |
| @@ -31,12 +33,12 @@ gt215_gr = { | |||
| 31 | .tlb_flush = g84_gr_tlb_flush, | 33 | .tlb_flush = g84_gr_tlb_flush, |
| 32 | .units = nv50_gr_units, | 34 | .units = nv50_gr_units, |
| 33 | .sclass = { | 35 | .sclass = { |
| 34 | { -1, -1, 0x0030, &nv50_gr_object }, | 36 | { -1, -1, NV_NULL_CLASS, &nv50_gr_object }, |
| 35 | { -1, -1, 0x502d, &nv50_gr_object }, | 37 | { -1, -1, NV50_TWOD, &nv50_gr_object }, |
| 36 | { -1, -1, 0x5039, &nv50_gr_object }, | 38 | { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object }, |
| 37 | { -1, -1, 0x50c0, &nv50_gr_object }, | 39 | { -1, -1, NV50_COMPUTE, &nv50_gr_object }, |
| 38 | { -1, -1, 0x8597, &nv50_gr_object }, | 40 | { -1, -1, GT214_TESLA, &nv50_gr_object }, |
| 39 | { -1, -1, 0x85c0, &nv50_gr_object }, | 41 | { -1, -1, GT214_COMPUTE, &nv50_gr_object }, |
| 40 | {} | 42 | {} |
| 41 | } | 43 | } |
| 42 | }; | 44 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c index 95d5219faf93..eb1a90644752 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | */ | 23 | */ |
| 24 | #include "nv50.h" | 24 | #include "nv50.h" |
| 25 | 25 | ||
| 26 | #include <nvif/class.h> | ||
| 27 | |||
| 26 | static const struct nvkm_gr_func | 28 | static const struct nvkm_gr_func |
| 27 | mcp79_gr = { | 29 | mcp79_gr = { |
| 28 | .init = nv50_gr_init, | 30 | .init = nv50_gr_init, |
| @@ -30,11 +32,11 @@ mcp79_gr = { | |||
| 30 | .chan_new = nv50_gr_chan_new, | 32 | .chan_new = nv50_gr_chan_new, |
| 31 | .units = nv50_gr_units, | 33 | .units = nv50_gr_units, |
| 32 | .sclass = { | 34 | .sclass = { |
| 33 | { -1, -1, 0x0030, &nv50_gr_object }, | 35 | { -1, -1, NV_NULL_CLASS, &nv50_gr_object }, |
| 34 | { -1, -1, 0x502d, &nv50_gr_object }, | 36 | { -1, -1, NV50_TWOD, &nv50_gr_object }, |
| 35 | { -1, -1, 0x5039, &nv50_gr_object }, | 37 | { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object }, |
| 36 | { -1, -1, 0x50c0, &nv50_gr_object }, | 38 | { -1, -1, NV50_COMPUTE, &nv50_gr_object }, |
| 37 | { -1, -1, 0x8397, &nv50_gr_object }, | 39 | { -1, -1, GT200_TESLA, &nv50_gr_object }, |
| 38 | {} | 40 | {} |
| 39 | } | 41 | } |
| 40 | }; | 42 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c index 027b58e5976b..c91eb56e9327 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | */ | 23 | */ |
| 24 | #include "nv50.h" | 24 | #include "nv50.h" |
| 25 | 25 | ||
| 26 | #include <nvif/class.h> | ||
| 27 | |||
| 26 | static const struct nvkm_gr_func | 28 | static const struct nvkm_gr_func |
| 27 | mcp89_gr = { | 29 | mcp89_gr = { |
| 28 | .init = nv50_gr_init, | 30 | .init = nv50_gr_init, |
| @@ -31,12 +33,12 @@ mcp89_gr = { | |||
| 31 | .tlb_flush = g84_gr_tlb_flush, | 33 | .tlb_flush = g84_gr_tlb_flush, |
| 32 | .units = nv50_gr_units, | 34 | .units = nv50_gr_units, |
| 33 | .sclass = { | 35 | .sclass = { |
| 34 | { -1, -1, 0x0030, &nv50_gr_object }, | 36 | { -1, -1, NV_NULL_CLASS, &nv50_gr_object }, |
| 35 | { -1, -1, 0x502d, &nv50_gr_object }, | 37 | { -1, -1, NV50_TWOD, &nv50_gr_object }, |
| 36 | { -1, -1, 0x5039, &nv50_gr_object }, | 38 | { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object }, |
| 37 | { -1, -1, 0x50c0, &nv50_gr_object }, | 39 | { -1, -1, NV50_COMPUTE, &nv50_gr_object }, |
| 38 | { -1, -1, 0x85c0, &nv50_gr_object }, | 40 | { -1, -1, GT214_COMPUTE, &nv50_gr_object }, |
| 39 | { -1, -1, 0x8697, &nv50_gr_object }, | 41 | { -1, -1, GT21A_TESLA, &nv50_gr_object }, |
| 40 | {} | 42 | {} |
| 41 | } | 43 | } |
| 42 | }; | 44 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c index fca67de43f2b..df16ffda1749 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | #include <core/gpuobj.h> | 27 | #include <core/gpuobj.h> |
| 28 | #include <engine/fifo.h> | 28 | #include <engine/fifo.h> |
| 29 | 29 | ||
| 30 | #include <nvif/class.h> | ||
| 31 | |||
| 30 | u64 | 32 | u64 |
| 31 | nv50_gr_units(struct nvkm_gr *gr) | 33 | nv50_gr_units(struct nvkm_gr *gr) |
| 32 | { | 34 | { |
| @@ -778,11 +780,11 @@ nv50_gr = { | |||
| 778 | .chan_new = nv50_gr_chan_new, | 780 | .chan_new = nv50_gr_chan_new, |
| 779 | .units = nv50_gr_units, | 781 | .units = nv50_gr_units, |
| 780 | .sclass = { | 782 | .sclass = { |
| 781 | { -1, -1, 0x0030, &nv50_gr_object }, | 783 | { -1, -1, NV_NULL_CLASS, &nv50_gr_object }, |
| 782 | { -1, -1, 0x502d, &nv50_gr_object }, | 784 | { -1, -1, NV50_TWOD, &nv50_gr_object }, |
| 783 | { -1, -1, 0x5039, &nv50_gr_object }, | 785 | { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object }, |
| 784 | { -1, -1, 0x5097, &nv50_gr_object }, | 786 | { -1, -1, NV50_TESLA, &nv50_gr_object }, |
| 785 | { -1, -1, 0x50c0, &nv50_gr_object }, | 787 | { -1, -1, NV50_COMPUTE, &nv50_gr_object }, |
| 786 | {} | 788 | {} |
| 787 | } | 789 | } |
| 788 | }; | 790 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h index d8adcdf6985a..2a52d9f026ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h | |||
| @@ -15,6 +15,7 @@ struct nvkm_gr_func { | |||
| 15 | void *(*dtor)(struct nvkm_gr *); | 15 | void *(*dtor)(struct nvkm_gr *); |
| 16 | int (*oneinit)(struct nvkm_gr *); | 16 | int (*oneinit)(struct nvkm_gr *); |
| 17 | int (*init)(struct nvkm_gr *); | 17 | int (*init)(struct nvkm_gr *); |
| 18 | int (*fini)(struct nvkm_gr *, bool); | ||
| 18 | void (*intr)(struct nvkm_gr *); | 19 | void (*intr)(struct nvkm_gr *); |
| 19 | void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *); | 20 | void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *); |
| 20 | int (*tlb_flush)(struct nvkm_gr *); | 21 | int (*tlb_flush)(struct nvkm_gr *); |
| @@ -24,6 +25,7 @@ struct nvkm_gr_func { | |||
| 24 | /* Returns chipset-specific counts of units packed into an u64. | 25 | /* Returns chipset-specific counts of units packed into an u64. |
| 25 | */ | 26 | */ |
| 26 | u64 (*units)(struct nvkm_gr *); | 27 | u64 (*units)(struct nvkm_gr *); |
| 28 | bool (*chsw_load)(struct nvkm_gr *); | ||
| 27 | struct nvkm_sclass sclass[]; | 29 | struct nvkm_sclass sclass[]; |
| 28 | }; | 30 | }; |
| 29 | 31 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild new file mode 100644 index 000000000000..584863db9bfc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | nvkm-y += nvkm/falcon/base.o | ||
| 2 | nvkm-y += nvkm/falcon/v1.o | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c new file mode 100644 index 000000000000..4852f313762f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c | |||
| @@ -0,0 +1,191 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | #include "priv.h" | ||
| 23 | |||
| 24 | #include <subdev/mc.h> | ||
| 25 | |||
| 26 | void | ||
| 27 | nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, | ||
| 28 | u32 size, u16 tag, u8 port, bool secure) | ||
| 29 | { | ||
| 30 | if (secure && !falcon->secret) { | ||
| 31 | nvkm_warn(falcon->user, | ||
| 32 | "writing with secure tag on a non-secure falcon!\n"); | ||
| 33 | return; | ||
| 34 | } | ||
| 35 | |||
| 36 | falcon->func->load_imem(falcon, data, start, size, tag, port, | ||
| 37 | secure); | ||
| 38 | } | ||
| 39 | |||
| 40 | void | ||
| 41 | nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, | ||
| 42 | u32 size, u8 port) | ||
| 43 | { | ||
| 44 | falcon->func->load_dmem(falcon, data, start, size, port); | ||
| 45 | } | ||
| 46 | |||
| 47 | void | ||
| 48 | nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, | ||
| 49 | void *data) | ||
| 50 | { | ||
| 51 | falcon->func->read_dmem(falcon, start, size, port, data); | ||
| 52 | } | ||
| 53 | |||
| 54 | void | ||
| 55 | nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst) | ||
| 56 | { | ||
| 57 | if (!falcon->func->bind_context) { | ||
| 58 | nvkm_error(falcon->user, | ||
| 59 | "Context binding not supported on this falcon!\n"); | ||
| 60 | return; | ||
| 61 | } | ||
| 62 | |||
| 63 | falcon->func->bind_context(falcon, inst); | ||
| 64 | } | ||
| 65 | |||
| 66 | void | ||
| 67 | nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) | ||
| 68 | { | ||
| 69 | falcon->func->set_start_addr(falcon, start_addr); | ||
| 70 | } | ||
| 71 | |||
| 72 | void | ||
| 73 | nvkm_falcon_start(struct nvkm_falcon *falcon) | ||
| 74 | { | ||
| 75 | falcon->func->start(falcon); | ||
| 76 | } | ||
| 77 | |||
| 78 | int | ||
| 79 | nvkm_falcon_enable(struct nvkm_falcon *falcon) | ||
| 80 | { | ||
| 81 | struct nvkm_device *device = falcon->owner->device; | ||
| 82 | enum nvkm_devidx id = falcon->owner->index; | ||
| 83 | int ret; | ||
| 84 | |||
| 85 | nvkm_mc_enable(device, id); | ||
| 86 | ret = falcon->func->enable(falcon); | ||
| 87 | if (ret) { | ||
| 88 | nvkm_mc_disable(device, id); | ||
| 89 | return ret; | ||
| 90 | } | ||
| 91 | |||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | void | ||
| 96 | nvkm_falcon_disable(struct nvkm_falcon *falcon) | ||
| 97 | { | ||
| 98 | struct nvkm_device *device = falcon->owner->device; | ||
| 99 | enum nvkm_devidx id = falcon->owner->index; | ||
| 100 | |||
| 101 | /* already disabled, return or wait_idle will timeout */ | ||
| 102 | if (!nvkm_mc_enabled(device, id)) | ||
| 103 | return; | ||
| 104 | |||
| 105 | falcon->func->disable(falcon); | ||
| 106 | |||
| 107 | nvkm_mc_disable(device, id); | ||
| 108 | } | ||
| 109 | |||
| 110 | int | ||
| 111 | nvkm_falcon_reset(struct nvkm_falcon *falcon) | ||
| 112 | { | ||
| 113 | nvkm_falcon_disable(falcon); | ||
| 114 | return nvkm_falcon_enable(falcon); | ||
| 115 | } | ||
| 116 | |||
| 117 | int | ||
| 118 | nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) | ||
| 119 | { | ||
| 120 | return falcon->func->wait_for_halt(falcon, ms); | ||
| 121 | } | ||
| 122 | |||
| 123 | int | ||
| 124 | nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) | ||
| 125 | { | ||
| 126 | return falcon->func->clear_interrupt(falcon, mask); | ||
| 127 | } | ||
| 128 | |||
| 129 | void | ||
| 130 | nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) | ||
| 131 | { | ||
| 132 | mutex_lock(&falcon->mutex); | ||
| 133 | if (falcon->user == user) { | ||
| 134 | nvkm_debug(falcon->user, "released %s falcon\n", falcon->name); | ||
| 135 | falcon->user = NULL; | ||
| 136 | } | ||
| 137 | mutex_unlock(&falcon->mutex); | ||
| 138 | } | ||
| 139 | |||
| 140 | int | ||
| 141 | nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) | ||
| 142 | { | ||
| 143 | mutex_lock(&falcon->mutex); | ||
| 144 | if (falcon->user) { | ||
| 145 | nvkm_error(user, "%s falcon already acquired by %s!\n", | ||
| 146 | falcon->name, nvkm_subdev_name[falcon->user->index]); | ||
| 147 | mutex_unlock(&falcon->mutex); | ||
| 148 | return -EBUSY; | ||
| 149 | } | ||
| 150 | |||
| 151 | nvkm_debug(user, "acquired %s falcon\n", falcon->name); | ||
| 152 | falcon->user = user; | ||
| 153 | mutex_unlock(&falcon->mutex); | ||
| 154 | return 0; | ||
| 155 | } | ||
| 156 | |||
| 157 | void | ||
| 158 | nvkm_falcon_ctor(const struct nvkm_falcon_func *func, | ||
| 159 | struct nvkm_subdev *subdev, const char *name, u32 addr, | ||
| 160 | struct nvkm_falcon *falcon) | ||
| 161 | { | ||
| 162 | u32 reg; | ||
| 163 | |||
| 164 | falcon->func = func; | ||
| 165 | falcon->owner = subdev; | ||
| 166 | falcon->name = name; | ||
| 167 | falcon->addr = addr; | ||
| 168 | mutex_init(&falcon->mutex); | ||
| 169 | |||
| 170 | reg = nvkm_falcon_rd32(falcon, 0x12c); | ||
| 171 | falcon->version = reg & 0xf; | ||
| 172 | falcon->secret = (reg >> 4) & 0x3; | ||
| 173 | falcon->code.ports = (reg >> 8) & 0xf; | ||
| 174 | falcon->data.ports = (reg >> 12) & 0xf; | ||
| 175 | |||
| 176 | reg = nvkm_falcon_rd32(falcon, 0x108); | ||
| 177 | falcon->code.limit = (reg & 0x1ff) << 8; | ||
| 178 | falcon->data.limit = (reg & 0x3fe00) >> 1; | ||
| 179 | |||
| 180 | reg = nvkm_falcon_rd32(falcon, 0xc08); | ||
| 181 | falcon->debug = (reg >> 20) & 0x1; | ||
| 182 | } | ||
| 183 | |||
| 184 | void | ||
| 185 | nvkm_falcon_del(struct nvkm_falcon **pfalcon) | ||
| 186 | { | ||
| 187 | if (*pfalcon) { | ||
| 188 | kfree(*pfalcon); | ||
| 189 | *pfalcon = NULL; | ||
| 190 | } | ||
| 191 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h new file mode 100644 index 000000000000..97b56f759d0b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | #ifndef __NVKM_FALCON_PRIV_H__ | ||
| 2 | #define __NVKM_FALCON_PRIV_H__ | ||
| 3 | #include <engine/falcon.h> | ||
| 4 | |||
| 5 | void | ||
| 6 | nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *, | ||
| 7 | const char *, u32, struct nvkm_falcon *); | ||
| 8 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c new file mode 100644 index 000000000000..b537f111f39c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c | |||
| @@ -0,0 +1,266 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | #include "priv.h" | ||
| 23 | |||
| 24 | #include <core/gpuobj.h> | ||
| 25 | #include <core/memory.h> | ||
| 26 | #include <subdev/timer.h> | ||
| 27 | |||
| 28 | static void | ||
| 29 | nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, | ||
| 30 | u32 size, u16 tag, u8 port, bool secure) | ||
| 31 | { | ||
| 32 | u8 rem = size % 4; | ||
| 33 | u32 reg; | ||
| 34 | int i; | ||
| 35 | |||
| 36 | size -= rem; | ||
| 37 | |||
| 38 | reg = start | BIT(24) | (secure ? BIT(28) : 0); | ||
| 39 | nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg); | ||
| 40 | for (i = 0; i < size / 4; i++) { | ||
| 41 | /* write new tag every 256B */ | ||
| 42 | if ((i & 0x3f) == 0) | ||
| 43 | nvkm_falcon_wr32(falcon, 0x188, tag++); | ||
| 44 | nvkm_falcon_wr32(falcon, 0x184, ((u32 *)data)[i]); | ||
| 45 | } | ||
| 46 | |||
| 47 | /* | ||
| 48 | * If size is not a multiple of 4, mask the last work to ensure garbage | ||
| 49 | * does not get written | ||
| 50 | */ | ||
| 51 | if (rem) { | ||
| 52 | u32 extra = ((u32 *)data)[i]; | ||
| 53 | |||
| 54 | /* write new tag every 256B */ | ||
| 55 | if ((i & 0x3f) == 0) | ||
| 56 | nvkm_falcon_wr32(falcon, 0x188, tag++); | ||
| 57 | nvkm_falcon_wr32(falcon, 0x184, extra & (BIT(rem * 8) - 1)); | ||
| 58 | ++i; | ||
| 59 | } | ||
| 60 | |||
| 61 | /* code must be padded to 0x40 words */ | ||
| 62 | for (; i & 0x3f; i++) | ||
| 63 | nvkm_falcon_wr32(falcon, 0x184, 0); | ||
| 64 | } | ||
| 65 | |||
| 66 | static void | ||
| 67 | nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, | ||
| 68 | u32 size, u8 port) | ||
| 69 | { | ||
| 70 | u8 rem = size % 4; | ||
| 71 | int i; | ||
| 72 | |||
| 73 | size -= rem; | ||
| 74 | |||
| 75 | nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 24)); | ||
| 76 | for (i = 0; i < size / 4; i++) | ||
| 77 | nvkm_falcon_wr32(falcon, 0x1c4, ((u32 *)data)[i]); | ||
| 78 | |||
| 79 | /* | ||
| 80 | * If size is not a multiple of 4, mask the last work to ensure garbage | ||
| 81 | * does not get read | ||
| 82 | */ | ||
| 83 | if (rem) { | ||
| 84 | u32 extra = ((u32 *)data)[i]; | ||
| 85 | |||
| 86 | nvkm_falcon_wr32(falcon, 0x1c4, extra & (BIT(rem * 8) - 1)); | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | static void | ||
| 91 | nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, | ||
| 92 | u8 port, void *data) | ||
| 93 | { | ||
| 94 | u8 rem = size % 4; | ||
| 95 | int i; | ||
| 96 | |||
| 97 | size -= rem; | ||
| 98 | |||
| 99 | nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 25)); | ||
| 100 | for (i = 0; i < size / 4; i++) | ||
| 101 | ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4); | ||
| 102 | |||
| 103 | /* | ||
| 104 | * If size is not a multiple of 4, mask the last work to ensure garbage | ||
| 105 | * does not get read | ||
| 106 | */ | ||
| 107 | if (rem) { | ||
| 108 | u32 extra = nvkm_falcon_rd32(falcon, 0x1c4); | ||
| 109 | |||
| 110 | for (i = size; i < size + rem; i++) { | ||
| 111 | ((u8 *)data)[i] = (u8)(extra & 0xff); | ||
| 112 | extra >>= 8; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | static void | ||
| 118 | nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx) | ||
| 119 | { | ||
| 120 | u32 inst_loc; | ||
| 121 | |||
| 122 | /* disable instance block binding */ | ||
| 123 | if (ctx == NULL) { | ||
| 124 | nvkm_falcon_wr32(falcon, 0x10c, 0x0); | ||
| 125 | return; | ||
| 126 | } | ||
| 127 | |||
| 128 | nvkm_falcon_wr32(falcon, 0x10c, 0x1); | ||
| 129 | |||
| 130 | /* setup apertures - virtual */ | ||
| 131 | nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_UCODE, 0x4); | ||
| 132 | nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_VIRT, 0x0); | ||
| 133 | /* setup apertures - physical */ | ||
| 134 | nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_VID, 0x4); | ||
| 135 | nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5); | ||
| 136 | nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6); | ||
| 137 | |||
| 138 | /* Set context */ | ||
| 139 | switch (nvkm_memory_target(ctx->memory)) { | ||
| 140 | case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break; | ||
| 141 | case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break; | ||
| 142 | default: | ||
| 143 | WARN_ON(1); | ||
| 144 | return; | ||
| 145 | } | ||
| 146 | |||
| 147 | /* Enable context */ | ||
| 148 | nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1); | ||
| 149 | nvkm_falcon_wr32(falcon, 0x480, | ||
| 150 | ((ctx->addr >> 12) & 0xfffffff) | | ||
| 151 | (inst_loc << 28) | (1 << 30)); | ||
| 152 | } | ||
| 153 | |||
| 154 | static void | ||
| 155 | nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) | ||
| 156 | { | ||
| 157 | nvkm_falcon_wr32(falcon, 0x104, start_addr); | ||
| 158 | } | ||
| 159 | |||
| 160 | static void | ||
| 161 | nvkm_falcon_v1_start(struct nvkm_falcon *falcon) | ||
| 162 | { | ||
| 163 | u32 reg = nvkm_falcon_rd32(falcon, 0x100); | ||
| 164 | |||
| 165 | if (reg & BIT(6)) | ||
| 166 | nvkm_falcon_wr32(falcon, 0x130, 0x2); | ||
| 167 | else | ||
| 168 | nvkm_falcon_wr32(falcon, 0x100, 0x2); | ||
| 169 | } | ||
| 170 | |||
| 171 | static int | ||
| 172 | nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) | ||
| 173 | { | ||
| 174 | struct nvkm_device *device = falcon->owner->device; | ||
| 175 | int ret; | ||
| 176 | |||
| 177 | ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10); | ||
| 178 | if (ret < 0) | ||
| 179 | return ret; | ||
| 180 | |||
| 181 | return 0; | ||
| 182 | } | ||
| 183 | |||
| 184 | static int | ||
| 185 | nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) | ||
| 186 | { | ||
| 187 | struct nvkm_device *device = falcon->owner->device; | ||
| 188 | int ret; | ||
| 189 | |||
| 190 | /* clear interrupt(s) */ | ||
| 191 | nvkm_falcon_mask(falcon, 0x004, mask, mask); | ||
| 192 | /* wait until interrupts are cleared */ | ||
| 193 | ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0); | ||
| 194 | if (ret < 0) | ||
| 195 | return ret; | ||
| 196 | |||
| 197 | return 0; | ||
| 198 | } | ||
| 199 | |||
| 200 | static int | ||
| 201 | falcon_v1_wait_idle(struct nvkm_falcon *falcon) | ||
| 202 | { | ||
| 203 | struct nvkm_device *device = falcon->owner->device; | ||
| 204 | int ret; | ||
| 205 | |||
| 206 | ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0); | ||
| 207 | if (ret < 0) | ||
| 208 | return ret; | ||
| 209 | |||
| 210 | return 0; | ||
| 211 | } | ||
| 212 | |||
| 213 | static int | ||
| 214 | nvkm_falcon_v1_enable(struct nvkm_falcon *falcon) | ||
| 215 | { | ||
| 216 | struct nvkm_device *device = falcon->owner->device; | ||
| 217 | int ret; | ||
| 218 | |||
| 219 | ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0); | ||
| 220 | if (ret < 0) { | ||
| 221 | nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n"); | ||
| 222 | return ret; | ||
| 223 | } | ||
| 224 | |||
| 225 | ret = falcon_v1_wait_idle(falcon); | ||
| 226 | if (ret) | ||
| 227 | return ret; | ||
| 228 | |||
| 229 | /* enable IRQs */ | ||
| 230 | nvkm_falcon_wr32(falcon, 0x010, 0xff); | ||
| 231 | |||
| 232 | return 0; | ||
| 233 | } | ||
| 234 | |||
| 235 | static void | ||
| 236 | nvkm_falcon_v1_disable(struct nvkm_falcon *falcon) | ||
| 237 | { | ||
| 238 | /* disable IRQs and wait for any previous code to complete */ | ||
| 239 | nvkm_falcon_wr32(falcon, 0x014, 0xff); | ||
| 240 | falcon_v1_wait_idle(falcon); | ||
| 241 | } | ||
| 242 | |||
| 243 | static const struct nvkm_falcon_func | ||
| 244 | nvkm_falcon_v1 = { | ||
| 245 | .load_imem = nvkm_falcon_v1_load_imem, | ||
| 246 | .load_dmem = nvkm_falcon_v1_load_dmem, | ||
| 247 | .read_dmem = nvkm_falcon_v1_read_dmem, | ||
| 248 | .bind_context = nvkm_falcon_v1_bind_context, | ||
| 249 | .start = nvkm_falcon_v1_start, | ||
| 250 | .wait_for_halt = nvkm_falcon_v1_wait_for_halt, | ||
| 251 | .clear_interrupt = nvkm_falcon_v1_clear_interrupt, | ||
| 252 | .enable = nvkm_falcon_v1_enable, | ||
| 253 | .disable = nvkm_falcon_v1_disable, | ||
| 254 | .set_start_addr = nvkm_falcon_v1_set_start_addr, | ||
| 255 | }; | ||
| 256 | |||
| 257 | int | ||
| 258 | nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr, | ||
| 259 | struct nvkm_falcon **pfalcon) | ||
| 260 | { | ||
| 261 | struct nvkm_falcon *falcon; | ||
| 262 | if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL))) | ||
| 263 | return -ENOMEM; | ||
| 264 | nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon); | ||
| 265 | return 0; | ||
| 266 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild index be57220a2e01..6b4f1e06a38f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild | |||
| @@ -19,6 +19,7 @@ nvkm-y += nvkm/subdev/bios/pcir.o | |||
| 19 | nvkm-y += nvkm/subdev/bios/perf.o | 19 | nvkm-y += nvkm/subdev/bios/perf.o |
| 20 | nvkm-y += nvkm/subdev/bios/pll.o | 20 | nvkm-y += nvkm/subdev/bios/pll.o |
| 21 | nvkm-y += nvkm/subdev/bios/pmu.o | 21 | nvkm-y += nvkm/subdev/bios/pmu.o |
| 22 | nvkm-y += nvkm/subdev/bios/power_budget.o | ||
| 22 | nvkm-y += nvkm/subdev/bios/ramcfg.o | 23 | nvkm-y += nvkm/subdev/bios/ramcfg.o |
| 23 | nvkm-y += nvkm/subdev/bios/rammap.o | 24 | nvkm-y += nvkm/subdev/bios/rammap.o |
| 24 | nvkm-y += nvkm/subdev/bios/shadow.o | 25 | nvkm-y += nvkm/subdev/bios/shadow.o |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c new file mode 100644 index 000000000000..617bfffce4ad --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c | |||
| @@ -0,0 +1,126 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2016 Karol Herbst | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Karol Herbst | ||
| 23 | */ | ||
| 24 | #include <subdev/bios.h> | ||
| 25 | #include <subdev/bios/bit.h> | ||
| 26 | #include <subdev/bios/power_budget.h> | ||
| 27 | |||
| 28 | static u32 | ||
| 29 | nvbios_power_budget_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, | ||
| 30 | u8 *len) | ||
| 31 | { | ||
| 32 | struct bit_entry bit_P; | ||
| 33 | u32 power_budget; | ||
| 34 | |||
| 35 | if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 || | ||
| 36 | bit_P.length < 0x2c) | ||
| 37 | return 0; | ||
| 38 | |||
| 39 | power_budget = nvbios_rd32(bios, bit_P.offset + 0x2c); | ||
| 40 | if (!power_budget) | ||
| 41 | return 0; | ||
| 42 | |||
| 43 | *ver = nvbios_rd08(bios, power_budget); | ||
| 44 | switch (*ver) { | ||
| 45 | case 0x20: | ||
| 46 | case 0x30: | ||
| 47 | *hdr = nvbios_rd08(bios, power_budget + 0x1); | ||
| 48 | *len = nvbios_rd08(bios, power_budget + 0x2); | ||
| 49 | *cnt = nvbios_rd08(bios, power_budget + 0x3); | ||
| 50 | return power_budget; | ||
| 51 | default: | ||
| 52 | break; | ||
| 53 | } | ||
| 54 | |||
| 55 | return 0; | ||
| 56 | } | ||
| 57 | |||
| 58 | int | ||
| 59 | nvbios_power_budget_header(struct nvkm_bios *bios, | ||
| 60 | struct nvbios_power_budget *budget) | ||
| 61 | { | ||
| 62 | struct nvkm_subdev *subdev = &bios->subdev; | ||
| 63 | u8 ver, hdr, cnt, len, cap_entry; | ||
| 64 | u32 header; | ||
| 65 | |||
| 66 | if (!bios || !budget) | ||
| 67 | return -EINVAL; | ||
| 68 | |||
| 69 | header = nvbios_power_budget_table(bios, &ver, &hdr, &cnt, &len); | ||
| 70 | if (!header || !cnt) | ||
| 71 | return -ENODEV; | ||
| 72 | |||
| 73 | switch (ver) { | ||
| 74 | case 0x20: | ||
| 75 | cap_entry = nvbios_rd08(bios, header + 0x9); | ||
| 76 | break; | ||
| 77 | case 0x30: | ||
| 78 | cap_entry = nvbios_rd08(bios, header + 0xa); | ||
| 79 | break; | ||
| 80 | default: | ||
| 81 | cap_entry = 0xff; | ||
| 82 | } | ||
| 83 | |||
| 84 | if (cap_entry >= cnt && cap_entry != 0xff) { | ||
| 85 | nvkm_warn(subdev, | ||
| 86 | "invalid cap_entry in power budget table found\n"); | ||
| 87 | budget->cap_entry = 0xff; | ||
| 88 | return -EINVAL; | ||
| 89 | } | ||
| 90 | |||
| 91 | budget->offset = header; | ||
| 92 | budget->ver = ver; | ||
| 93 | budget->hlen = hdr; | ||
| 94 | budget->elen = len; | ||
| 95 | budget->ecount = cnt; | ||
| 96 | |||
| 97 | budget->cap_entry = cap_entry; | ||
| 98 | |||
| 99 | return 0; | ||
| 100 | } | ||
| 101 | |||
| 102 | int | ||
| 103 | nvbios_power_budget_entry(struct nvkm_bios *bios, | ||
| 104 | struct nvbios_power_budget *budget, | ||
| 105 | u8 idx, struct nvbios_power_budget_entry *entry) | ||
| 106 | { | ||
| 107 | u32 entry_offset; | ||
| 108 | |||
| 109 | if (!bios || !budget || !budget->offset || idx >= budget->ecount | ||
| 110 | || !entry) | ||
| 111 | return -EINVAL; | ||
| 112 | |||
| 113 | entry_offset = budget->offset + budget->hlen + idx * budget->elen; | ||
| 114 | |||
| 115 | if (budget->ver >= 0x20) { | ||
| 116 | entry->min_w = nvbios_rd32(bios, entry_offset + 0x2); | ||
| 117 | entry->avg_w = nvbios_rd32(bios, entry_offset + 0x6); | ||
| 118 | entry->max_w = nvbios_rd32(bios, entry_offset + 0xa); | ||
| 119 | } else { | ||
| 120 | entry->min_w = 0; | ||
| 121 | entry->max_w = nvbios_rd32(bios, entry_offset + 0x2); | ||
| 122 | entry->avg_w = entry->max_w; | ||
| 123 | } | ||
| 124 | |||
| 125 | return 0; | ||
| 126 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c index 5841f297973c..da1770e47490 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c | |||
| @@ -112,7 +112,7 @@ read_pll_src(struct nv50_clk *clk, u32 base) | |||
| 112 | M = (coef & 0x000000ff) >> 0; | 112 | M = (coef & 0x000000ff) >> 0; |
| 113 | break; | 113 | break; |
| 114 | default: | 114 | default: |
| 115 | BUG_ON(1); | 115 | BUG(); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | if (M) | 118 | if (M) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c index c714b097719c..59362f8dee22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c | |||
| @@ -50,7 +50,7 @@ nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) | |||
| 50 | ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P); | 50 | ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P); |
| 51 | if (!ret) { | 51 | if (!ret) { |
| 52 | nvkm_error(subdev, "failed pll calculation\n"); | 52 | nvkm_error(subdev, "failed pll calculation\n"); |
| 53 | return ret; | 53 | return -EINVAL; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | switch (info.type) { | 56 | switch (info.type) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c index 093223d1df4f..6758da93a3a1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c | |||
| @@ -445,7 +445,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, | |||
| 445 | { | 445 | { |
| 446 | struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc; | 446 | struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc; |
| 447 | struct nvkm_mm *mm = &ram->vram; | 447 | struct nvkm_mm *mm = &ram->vram; |
| 448 | struct nvkm_mm_node *r; | 448 | struct nvkm_mm_node **node, *r; |
| 449 | struct nvkm_mem *mem; | 449 | struct nvkm_mem *mem; |
| 450 | int type = (memtype & 0x0ff); | 450 | int type = (memtype & 0x0ff); |
| 451 | int back = (memtype & 0x800); | 451 | int back = (memtype & 0x800); |
| @@ -462,7 +462,6 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, | |||
| 462 | if (!mem) | 462 | if (!mem) |
| 463 | return -ENOMEM; | 463 | return -ENOMEM; |
| 464 | 464 | ||
| 465 | INIT_LIST_HEAD(&mem->regions); | ||
| 466 | mem->size = size; | 465 | mem->size = size; |
| 467 | 466 | ||
| 468 | mutex_lock(&ram->fb->subdev.mutex); | 467 | mutex_lock(&ram->fb->subdev.mutex); |
| @@ -478,6 +477,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, | |||
| 478 | } | 477 | } |
| 479 | mem->memtype = type; | 478 | mem->memtype = type; |
| 480 | 479 | ||
| 480 | node = &mem->mem; | ||
| 481 | do { | 481 | do { |
| 482 | if (back) | 482 | if (back) |
| 483 | ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r); | 483 | ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r); |
| @@ -489,13 +489,13 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, | |||
| 489 | return ret; | 489 | return ret; |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | list_add_tail(&r->rl_entry, &mem->regions); | 492 | *node = r; |
| 493 | node = &r->next; | ||
| 493 | size -= r->length; | 494 | size -= r->length; |
| 494 | } while (size); | 495 | } while (size); |
| 495 | mutex_unlock(&ram->fb->subdev.mutex); | 496 | mutex_unlock(&ram->fb->subdev.mutex); |
| 496 | 497 | ||
| 497 | r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); | 498 | mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT; |
| 498 | mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT; | ||
| 499 | *pmem = mem; | 499 | *pmem = mem; |
| 500 | return 0; | 500 | return 0; |
| 501 | } | 501 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c index 7904fa41acef..fb8a1239743d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c | |||
| @@ -989,7 +989,7 @@ gk104_pll_calc_hiclk(int target_khz, int crystal, | |||
| 989 | int *N1, int *fN1, int *M1, int *P1, | 989 | int *N1, int *fN1, int *M1, int *P1, |
| 990 | int *N2, int *M2, int *P2) | 990 | int *N2, int *M2, int *P2) |
| 991 | { | 991 | { |
| 992 | int best_clk = 0, best_err = target_khz, p_ref, n_ref; | 992 | int best_err = target_khz, p_ref, n_ref; |
| 993 | bool upper = false; | 993 | bool upper = false; |
| 994 | 994 | ||
| 995 | *M1 = 1; | 995 | *M1 = 1; |
| @@ -1010,7 +1010,6 @@ gk104_pll_calc_hiclk(int target_khz, int crystal, | |||
| 1010 | /* we found a better combination */ | 1010 | /* we found a better combination */ |
| 1011 | if (cur_err < best_err) { | 1011 | if (cur_err < best_err) { |
| 1012 | best_err = cur_err; | 1012 | best_err = cur_err; |
| 1013 | best_clk = cur_clk; | ||
| 1014 | *N2 = cur_N; | 1013 | *N2 = cur_N; |
| 1015 | *N1 = n_ref; | 1014 | *N1 = n_ref; |
| 1016 | *P1 = p_ref; | 1015 | *P1 = p_ref; |
| @@ -1022,7 +1021,6 @@ gk104_pll_calc_hiclk(int target_khz, int crystal, | |||
| 1022 | - target_khz; | 1021 | - target_khz; |
| 1023 | if (cur_err < best_err) { | 1022 | if (cur_err < best_err) { |
| 1024 | best_err = cur_err; | 1023 | best_err = cur_err; |
| 1025 | best_clk = cur_clk; | ||
| 1026 | *N2 = cur_N; | 1024 | *N2 = cur_N; |
| 1027 | *N1 = n_ref; | 1025 | *N1 = n_ref; |
| 1028 | *P1 = p_ref; | 1026 | *P1 = p_ref; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c index 0a0e44b75577..017a91de74a0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c | |||
| @@ -39,7 +39,7 @@ mcp77_ram_init(struct nvkm_ram *base) | |||
| 39 | u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1; | 39 | u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1; |
| 40 | 40 | ||
| 41 | /* Enable NISO poller for various clients and set their associated | 41 | /* Enable NISO poller for various clients and set their associated |
| 42 | * read address, only for MCP77/78 and MCP79/7A. (fd#25701) | 42 | * read address, only for MCP77/78 and MCP79/7A. (fd#27501) |
| 43 | */ | 43 | */ |
| 44 | nvkm_wr32(device, 0x100c18, dniso); | 44 | nvkm_wr32(device, 0x100c18, dniso); |
| 45 | nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001); | 45 | nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c index 87bde8ff2d6b..6549b0588309 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c | |||
| @@ -496,15 +496,12 @@ nv50_ram_tidy(struct nvkm_ram *base) | |||
| 496 | void | 496 | void |
| 497 | __nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem) | 497 | __nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem) |
| 498 | { | 498 | { |
| 499 | struct nvkm_mm_node *this; | 499 | struct nvkm_mm_node *next = mem->mem; |
| 500 | 500 | struct nvkm_mm_node *node; | |
| 501 | while (!list_empty(&mem->regions)) { | 501 | while ((node = next)) { |
| 502 | this = list_first_entry(&mem->regions, typeof(*this), rl_entry); | 502 | next = node->next; |
| 503 | 503 | nvkm_mm_free(&ram->vram, &node); | |
| 504 | list_del(&this->rl_entry); | ||
| 505 | nvkm_mm_free(&ram->vram, &this); | ||
| 506 | } | 504 | } |
| 507 | |||
| 508 | nvkm_mm_free(&ram->tags, &mem->tag); | 505 | nvkm_mm_free(&ram->tags, &mem->tag); |
| 509 | } | 506 | } |
| 510 | 507 | ||
| @@ -530,7 +527,7 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, | |||
| 530 | { | 527 | { |
| 531 | struct nvkm_mm *heap = &ram->vram; | 528 | struct nvkm_mm *heap = &ram->vram; |
| 532 | struct nvkm_mm *tags = &ram->tags; | 529 | struct nvkm_mm *tags = &ram->tags; |
| 533 | struct nvkm_mm_node *r; | 530 | struct nvkm_mm_node **node, *r; |
| 534 | struct nvkm_mem *mem; | 531 | struct nvkm_mem *mem; |
| 535 | int comp = (memtype & 0x300) >> 8; | 532 | int comp = (memtype & 0x300) >> 8; |
| 536 | int type = (memtype & 0x07f); | 533 | int type = (memtype & 0x07f); |
| @@ -559,11 +556,11 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, | |||
| 559 | comp = 0; | 556 | comp = 0; |
| 560 | } | 557 | } |
| 561 | 558 | ||
| 562 | INIT_LIST_HEAD(&mem->regions); | ||
| 563 | mem->memtype = (comp << 7) | type; | 559 | mem->memtype = (comp << 7) | type; |
| 564 | mem->size = max; | 560 | mem->size = max; |
| 565 | 561 | ||
| 566 | type = nv50_fb_memtype[type]; | 562 | type = nv50_fb_memtype[type]; |
| 563 | node = &mem->mem; | ||
| 567 | do { | 564 | do { |
| 568 | if (back) | 565 | if (back) |
| 569 | ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r); | 566 | ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r); |
| @@ -575,13 +572,13 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, | |||
| 575 | return ret; | 572 | return ret; |
| 576 | } | 573 | } |
| 577 | 574 | ||
| 578 | list_add_tail(&r->rl_entry, &mem->regions); | 575 | *node = r; |
| 576 | node = &r->next; | ||
| 579 | max -= r->length; | 577 | max -= r->length; |
| 580 | } while (max); | 578 | } while (max); |
| 581 | mutex_unlock(&ram->fb->subdev.mutex); | 579 | mutex_unlock(&ram->fb->subdev.mutex); |
| 582 | 580 | ||
| 583 | r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); | 581 | mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT; |
| 584 | mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT; | ||
| 585 | *pmem = mem; | 582 | *pmem = mem; |
| 586 | return 0; | 583 | return 0; |
| 587 | } | 584 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c index f0af2a381eea..fecfa6afcf54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <subdev/bios.h> | 26 | #include <subdev/bios.h> |
| 27 | #include <subdev/bios/extdev.h> | 27 | #include <subdev/bios/extdev.h> |
| 28 | #include <subdev/bios/iccsense.h> | 28 | #include <subdev/bios/iccsense.h> |
| 29 | #include <subdev/bios/power_budget.h> | ||
| 29 | #include <subdev/i2c.h> | 30 | #include <subdev/i2c.h> |
| 30 | 31 | ||
| 31 | static bool | 32 | static bool |
| @@ -216,10 +217,25 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) | |||
| 216 | { | 217 | { |
| 217 | struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev); | 218 | struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev); |
| 218 | struct nvkm_bios *bios = subdev->device->bios; | 219 | struct nvkm_bios *bios = subdev->device->bios; |
| 220 | struct nvbios_power_budget budget; | ||
| 219 | struct nvbios_iccsense stbl; | 221 | struct nvbios_iccsense stbl; |
| 220 | int i; | 222 | int i, ret; |
| 221 | 223 | ||
| 222 | if (!bios || nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry) | 224 | if (!bios) |
| 225 | return 0; | ||
| 226 | |||
| 227 | ret = nvbios_power_budget_header(bios, &budget); | ||
| 228 | if (!ret && budget.cap_entry != 0xff) { | ||
| 229 | struct nvbios_power_budget_entry entry; | ||
| 230 | ret = nvbios_power_budget_entry(bios, &budget, | ||
| 231 | budget.cap_entry, &entry); | ||
| 232 | if (!ret) { | ||
| 233 | iccsense->power_w_max = entry.avg_w; | ||
| 234 | iccsense->power_w_crit = entry.max_w; | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 238 | if (nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry) | ||
| 223 | return 0; | 239 | return 0; |
| 224 | 240 | ||
| 225 | iccsense->data_valid = true; | 241 | iccsense->data_valid = true; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index a6a7fa0d7679..9dec58ec3d9f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | |||
| @@ -116,7 +116,7 @@ struct gk20a_instmem { | |||
| 116 | static enum nvkm_memory_target | 116 | static enum nvkm_memory_target |
| 117 | gk20a_instobj_target(struct nvkm_memory *memory) | 117 | gk20a_instobj_target(struct nvkm_memory *memory) |
| 118 | { | 118 | { |
| 119 | return NVKM_MEM_TARGET_HOST; | 119 | return NVKM_MEM_TARGET_NCOH; |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | static u64 | 122 | static u64 |
| @@ -305,11 +305,11 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) | |||
| 305 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); | 305 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); |
| 306 | struct gk20a_instmem *imem = node->base.imem; | 306 | struct gk20a_instmem *imem = node->base.imem; |
| 307 | struct device *dev = imem->base.subdev.device->dev; | 307 | struct device *dev = imem->base.subdev.device->dev; |
| 308 | struct nvkm_mm_node *r; | 308 | struct nvkm_mm_node *r = node->base.mem.mem; |
| 309 | unsigned long flags; | 309 | unsigned long flags; |
| 310 | int i; | 310 | int i; |
| 311 | 311 | ||
| 312 | if (unlikely(list_empty(&node->base.mem.regions))) | 312 | if (unlikely(!r)) |
| 313 | goto out; | 313 | goto out; |
| 314 | 314 | ||
| 315 | spin_lock_irqsave(&imem->lock, flags); | 315 | spin_lock_irqsave(&imem->lock, flags); |
| @@ -320,9 +320,6 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) | |||
| 320 | 320 | ||
| 321 | spin_unlock_irqrestore(&imem->lock, flags); | 321 | spin_unlock_irqrestore(&imem->lock, flags); |
| 322 | 322 | ||
| 323 | r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node, | ||
| 324 | rl_entry); | ||
| 325 | |||
| 326 | /* clear IOMMU bit to unmap pages */ | 323 | /* clear IOMMU bit to unmap pages */ |
| 327 | r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); | 324 | r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); |
| 328 | 325 | ||
| @@ -404,10 +401,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, | |||
| 404 | node->r.length = (npages << PAGE_SHIFT) >> 12; | 401 | node->r.length = (npages << PAGE_SHIFT) >> 12; |
| 405 | 402 | ||
| 406 | node->base.mem.offset = node->handle; | 403 | node->base.mem.offset = node->handle; |
| 407 | 404 | node->base.mem.mem = &node->r; | |
| 408 | INIT_LIST_HEAD(&node->base.mem.regions); | ||
| 409 | list_add_tail(&node->r.rl_entry, &node->base.mem.regions); | ||
| 410 | |||
| 411 | return 0; | 405 | return 0; |
| 412 | } | 406 | } |
| 413 | 407 | ||
| @@ -484,10 +478,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, | |||
| 484 | r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); | 478 | r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); |
| 485 | 479 | ||
| 486 | node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; | 480 | node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; |
| 487 | 481 | node->base.mem.mem = r; | |
| 488 | INIT_LIST_HEAD(&node->base.mem.regions); | ||
| 489 | list_add_tail(&r->rl_entry, &node->base.mem.regions); | ||
| 490 | |||
| 491 | return 0; | 482 | return 0; |
| 492 | 483 | ||
| 493 | release_area: | 484 | release_area: |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c index 6b25e25f9eba..09f669ac6630 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c | |||
| @@ -161,6 +161,16 @@ nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx) | |||
| 161 | } | 161 | } |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | bool | ||
| 165 | nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_devidx devidx) | ||
| 166 | { | ||
| 167 | u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); | ||
| 168 | |||
| 169 | return (pmc_enable != 0) && | ||
| 170 | ((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable); | ||
| 171 | } | ||
| 172 | |||
| 173 | |||
| 164 | static int | 174 | static int |
| 165 | nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend) | 175 | nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend) |
| 166 | { | 176 | { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index 5df9669ea39c..d06ad2c372bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | |||
| @@ -31,7 +31,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) | |||
| 31 | { | 31 | { |
| 32 | struct nvkm_vm *vm = vma->vm; | 32 | struct nvkm_vm *vm = vma->vm; |
| 33 | struct nvkm_mmu *mmu = vm->mmu; | 33 | struct nvkm_mmu *mmu = vm->mmu; |
| 34 | struct nvkm_mm_node *r; | 34 | struct nvkm_mm_node *r = node->mem; |
| 35 | int big = vma->node->type != mmu->func->spg_shift; | 35 | int big = vma->node->type != mmu->func->spg_shift; |
| 36 | u32 offset = vma->node->offset + (delta >> 12); | 36 | u32 offset = vma->node->offset + (delta >> 12); |
| 37 | u32 bits = vma->node->type - 12; | 37 | u32 bits = vma->node->type - 12; |
| @@ -41,7 +41,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) | |||
| 41 | u32 end, len; | 41 | u32 end, len; |
| 42 | 42 | ||
| 43 | delta = 0; | 43 | delta = 0; |
| 44 | list_for_each_entry(r, &node->regions, rl_entry) { | 44 | while (r) { |
| 45 | u64 phys = (u64)r->offset << 12; | 45 | u64 phys = (u64)r->offset << 12; |
| 46 | u32 num = r->length >> bits; | 46 | u32 num = r->length >> bits; |
| 47 | 47 | ||
| @@ -65,7 +65,8 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) | |||
| 65 | 65 | ||
| 66 | delta += (u64)len << vma->node->type; | 66 | delta += (u64)len << vma->node->type; |
| 67 | } | 67 | } |
| 68 | } | 68 | r = r->next; |
| 69 | }; | ||
| 69 | 70 | ||
| 70 | mmu->func->flush(vm); | 71 | mmu->func->flush(vm); |
| 71 | } | 72 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild index 2a31b7d66a6d..87bf41cef0c6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild | |||
| @@ -6,6 +6,7 @@ nvkm-y += nvkm/subdev/pci/nv40.o | |||
| 6 | nvkm-y += nvkm/subdev/pci/nv46.o | 6 | nvkm-y += nvkm/subdev/pci/nv46.o |
| 7 | nvkm-y += nvkm/subdev/pci/nv4c.o | 7 | nvkm-y += nvkm/subdev/pci/nv4c.o |
| 8 | nvkm-y += nvkm/subdev/pci/g84.o | 8 | nvkm-y += nvkm/subdev/pci/g84.o |
| 9 | nvkm-y += nvkm/subdev/pci/g92.o | ||
| 9 | nvkm-y += nvkm/subdev/pci/g94.o | 10 | nvkm-y += nvkm/subdev/pci/g94.o |
| 10 | nvkm-y += nvkm/subdev/pci/gf100.o | 11 | nvkm-y += nvkm/subdev/pci/gf100.o |
| 11 | nvkm-y += nvkm/subdev/pci/gf106.o | 12 | nvkm-y += nvkm/subdev/pci/gf106.o |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c new file mode 100644 index 000000000000..48874359d5f6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2015 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | #include "priv.h" | ||
| 25 | |||
| 26 | int | ||
| 27 | g92_pcie_version_supported(struct nvkm_pci *pci) | ||
| 28 | { | ||
| 29 | if ((nvkm_pci_rd32(pci, 0x460) & 0x200) == 0x200) | ||
| 30 | return 2; | ||
| 31 | return 1; | ||
| 32 | } | ||
| 33 | |||
| 34 | static const struct nvkm_pci_func | ||
| 35 | g92_pci_func = { | ||
| 36 | .init = g84_pci_init, | ||
| 37 | .rd32 = nv40_pci_rd32, | ||
| 38 | .wr08 = nv40_pci_wr08, | ||
| 39 | .wr32 = nv40_pci_wr32, | ||
| 40 | .msi_rearm = nv46_pci_msi_rearm, | ||
| 41 | |||
| 42 | .pcie.init = g84_pcie_init, | ||
| 43 | .pcie.set_link = g84_pcie_set_link, | ||
| 44 | |||
| 45 | .pcie.max_speed = g84_pcie_max_speed, | ||
| 46 | .pcie.cur_speed = g84_pcie_cur_speed, | ||
| 47 | |||
| 48 | .pcie.set_version = g84_pcie_set_version, | ||
| 49 | .pcie.version = g84_pcie_version, | ||
| 50 | .pcie.version_supported = g92_pcie_version_supported, | ||
| 51 | }; | ||
| 52 | |||
| 53 | int | ||
| 54 | g92_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) | ||
| 55 | { | ||
| 56 | return nvkm_pci_new_(&g92_pci_func, device, index, ppci); | ||
| 57 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c index 43444123bc04..09adb37a5664 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c | |||
| @@ -23,14 +23,6 @@ | |||
| 23 | */ | 23 | */ |
| 24 | #include "priv.h" | 24 | #include "priv.h" |
| 25 | 25 | ||
| 26 | int | ||
| 27 | g94_pcie_version_supported(struct nvkm_pci *pci) | ||
| 28 | { | ||
| 29 | if ((nvkm_pci_rd32(pci, 0x460) & 0x200) == 0x200) | ||
| 30 | return 2; | ||
| 31 | return 1; | ||
| 32 | } | ||
| 33 | |||
| 34 | static const struct nvkm_pci_func | 26 | static const struct nvkm_pci_func |
| 35 | g94_pci_func = { | 27 | g94_pci_func = { |
| 36 | .init = g84_pci_init, | 28 | .init = g84_pci_init, |
| @@ -47,7 +39,7 @@ g94_pci_func = { | |||
| 47 | 39 | ||
| 48 | .pcie.set_version = g84_pcie_set_version, | 40 | .pcie.set_version = g84_pcie_set_version, |
| 49 | .pcie.version = g84_pcie_version, | 41 | .pcie.version = g84_pcie_version, |
| 50 | .pcie.version_supported = g94_pcie_version_supported, | 42 | .pcie.version_supported = g92_pcie_version_supported, |
| 51 | }; | 43 | }; |
| 52 | 44 | ||
| 53 | int | 45 | int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c index e30ea676baf6..00a5e7d3ee9d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c | |||
| @@ -92,7 +92,7 @@ gf100_pci_func = { | |||
| 92 | 92 | ||
| 93 | .pcie.set_version = gf100_pcie_set_version, | 93 | .pcie.set_version = gf100_pcie_set_version, |
| 94 | .pcie.version = gf100_pcie_version, | 94 | .pcie.version = gf100_pcie_version, |
| 95 | .pcie.version_supported = g94_pcie_version_supported, | 95 | .pcie.version_supported = g92_pcie_version_supported, |
| 96 | }; | 96 | }; |
| 97 | 97 | ||
| 98 | int | 98 | int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c index c3b798c5c6dd..11bf419afe3f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c | |||
| @@ -39,7 +39,7 @@ gf106_pci_func = { | |||
| 39 | 39 | ||
| 40 | .pcie.set_version = gf100_pcie_set_version, | 40 | .pcie.set_version = gf100_pcie_set_version, |
| 41 | .pcie.version = gf100_pcie_version, | 41 | .pcie.version = gf100_pcie_version, |
| 42 | .pcie.version_supported = g94_pcie_version_supported, | 42 | .pcie.version_supported = g92_pcie_version_supported, |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | int | 45 | int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h index 23de3180aae5..86921ec962d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h | |||
| @@ -44,7 +44,7 @@ enum nvkm_pcie_speed g84_pcie_max_speed(struct nvkm_pci *); | |||
| 44 | int g84_pcie_init(struct nvkm_pci *); | 44 | int g84_pcie_init(struct nvkm_pci *); |
| 45 | int g84_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8); | 45 | int g84_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8); |
| 46 | 46 | ||
| 47 | int g94_pcie_version_supported(struct nvkm_pci *); | 47 | int g92_pcie_version_supported(struct nvkm_pci *); |
| 48 | 48 | ||
| 49 | void gf100_pcie_set_version(struct nvkm_pci *, u8); | 49 | void gf100_pcie_set_version(struct nvkm_pci *, u8); |
| 50 | int gf100_pcie_version(struct nvkm_pci *); | 50 | int gf100_pcie_version(struct nvkm_pci *); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild index 51fb4bf94a44..ca57c1e491b0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild | |||
| @@ -8,5 +8,6 @@ nvkm-y += nvkm/subdev/pmu/gk110.o | |||
| 8 | nvkm-y += nvkm/subdev/pmu/gk208.o | 8 | nvkm-y += nvkm/subdev/pmu/gk208.o |
| 9 | nvkm-y += nvkm/subdev/pmu/gk20a.o | 9 | nvkm-y += nvkm/subdev/pmu/gk20a.o |
| 10 | nvkm-y += nvkm/subdev/pmu/gm107.o | 10 | nvkm-y += nvkm/subdev/pmu/gm107.o |
| 11 | nvkm-y += nvkm/subdev/pmu/gm20b.o | ||
| 11 | nvkm-y += nvkm/subdev/pmu/gp100.o | 12 | nvkm-y += nvkm/subdev/pmu/gp100.o |
| 12 | nvkm-y += nvkm/subdev/pmu/gp102.o | 13 | nvkm-y += nvkm/subdev/pmu/gp102.o |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index e611ce80f8ef..a73f690eb4b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c | |||
| @@ -116,6 +116,8 @@ nvkm_pmu_init(struct nvkm_subdev *subdev) | |||
| 116 | static void * | 116 | static void * |
| 117 | nvkm_pmu_dtor(struct nvkm_subdev *subdev) | 117 | nvkm_pmu_dtor(struct nvkm_subdev *subdev) |
| 118 | { | 118 | { |
| 119 | struct nvkm_pmu *pmu = nvkm_pmu(subdev); | ||
| 120 | nvkm_falcon_del(&pmu->falcon); | ||
| 119 | return nvkm_pmu(subdev); | 121 | return nvkm_pmu(subdev); |
| 120 | } | 122 | } |
| 121 | 123 | ||
| @@ -129,15 +131,22 @@ nvkm_pmu = { | |||
| 129 | }; | 131 | }; |
| 130 | 132 | ||
| 131 | int | 133 | int |
| 134 | nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device, | ||
| 135 | int index, struct nvkm_pmu *pmu) | ||
| 136 | { | ||
| 137 | nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); | ||
| 138 | pmu->func = func; | ||
| 139 | INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); | ||
| 140 | init_waitqueue_head(&pmu->recv.wait); | ||
| 141 | return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon); | ||
| 142 | } | ||
| 143 | |||
| 144 | int | ||
| 132 | nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, | 145 | nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, |
| 133 | int index, struct nvkm_pmu **ppmu) | 146 | int index, struct nvkm_pmu **ppmu) |
| 134 | { | 147 | { |
| 135 | struct nvkm_pmu *pmu; | 148 | struct nvkm_pmu *pmu; |
| 136 | if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) | 149 | if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) |
| 137 | return -ENOMEM; | 150 | return -ENOMEM; |
| 138 | nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); | 151 | return nvkm_pmu_ctor(func, device, index, *ppmu); |
| 139 | pmu->func = func; | ||
| 140 | INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); | ||
| 141 | init_waitqueue_head(&pmu->recv.wait); | ||
| 142 | return 0; | ||
| 143 | } | 152 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c index f996d90c9f0d..9ca0db796cbe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 20 | * DEALINGS IN THE SOFTWARE. | 20 | * DEALINGS IN THE SOFTWARE. |
| 21 | */ | 21 | */ |
| 22 | #define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev) | 22 | #define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base) |
| 23 | #include "priv.h" | 23 | #include "priv.h" |
| 24 | 24 | ||
| 25 | #include <subdev/clk.h> | 25 | #include <subdev/clk.h> |
| @@ -43,9 +43,8 @@ struct gk20a_pmu { | |||
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | struct gk20a_pmu_dvfs_dev_status { | 45 | struct gk20a_pmu_dvfs_dev_status { |
| 46 | unsigned long total; | 46 | u32 total; |
| 47 | unsigned long busy; | 47 | u32 busy; |
| 48 | int cur_state; | ||
| 49 | }; | 48 | }; |
| 50 | 49 | ||
| 51 | static int | 50 | static int |
| @@ -56,13 +55,12 @@ gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) | |||
| 56 | return nvkm_clk_astate(clk, *state, 0, false); | 55 | return nvkm_clk_astate(clk, *state, 0, false); |
| 57 | } | 56 | } |
| 58 | 57 | ||
| 59 | static int | 58 | static void |
| 60 | gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) | 59 | gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) |
| 61 | { | 60 | { |
| 62 | struct nvkm_clk *clk = pmu->base.subdev.device->clk; | 61 | struct nvkm_clk *clk = pmu->base.subdev.device->clk; |
| 63 | 62 | ||
| 64 | *state = clk->pstate; | 63 | *state = clk->pstate; |
| 65 | return 0; | ||
| 66 | } | 64 | } |
| 67 | 65 | ||
| 68 | static int | 66 | static int |
| @@ -90,28 +88,26 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, | |||
| 90 | 88 | ||
| 91 | *state = level; | 89 | *state = level; |
| 92 | 90 | ||
| 93 | if (level == cur_level) | 91 | return (level != cur_level); |
| 94 | return 0; | ||
| 95 | else | ||
| 96 | return 1; | ||
| 97 | } | 92 | } |
| 98 | 93 | ||
| 99 | static int | 94 | static void |
| 100 | gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, | 95 | gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, |
| 101 | struct gk20a_pmu_dvfs_dev_status *status) | 96 | struct gk20a_pmu_dvfs_dev_status *status) |
| 102 | { | 97 | { |
| 103 | struct nvkm_device *device = pmu->base.subdev.device; | 98 | struct nvkm_falcon *falcon = pmu->base.falcon; |
| 104 | status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10)); | 99 | |
| 105 | status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10)); | 100 | status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10)); |
| 106 | return 0; | 101 | status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10)); |
| 107 | } | 102 | } |
| 108 | 103 | ||
| 109 | static void | 104 | static void |
| 110 | gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) | 105 | gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) |
| 111 | { | 106 | { |
| 112 | struct nvkm_device *device = pmu->base.subdev.device; | 107 | struct nvkm_falcon *falcon = pmu->base.falcon; |
| 113 | nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000); | 108 | |
| 114 | nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000); | 109 | nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000); |
| 110 | nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000); | ||
| 115 | } | 111 | } |
| 116 | 112 | ||
| 117 | static void | 113 | static void |
| @@ -127,7 +123,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) | |||
| 127 | struct nvkm_timer *tmr = device->timer; | 123 | struct nvkm_timer *tmr = device->timer; |
| 128 | struct nvkm_volt *volt = device->volt; | 124 | struct nvkm_volt *volt = device->volt; |
| 129 | u32 utilization = 0; | 125 | u32 utilization = 0; |
| 130 | int state, ret; | 126 | int state; |
| 131 | 127 | ||
| 132 | /* | 128 | /* |
| 133 | * The PMU is initialized before CLK and VOLT, so we have to make sure the | 129 | * The PMU is initialized before CLK and VOLT, so we have to make sure the |
| @@ -136,11 +132,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) | |||
| 136 | if (!clk || !volt) | 132 | if (!clk || !volt) |
| 137 | goto resched; | 133 | goto resched; |
| 138 | 134 | ||
| 139 | ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status); | 135 | gk20a_pmu_dvfs_get_dev_status(pmu, &status); |
| 140 | if (ret) { | ||
| 141 | nvkm_warn(subdev, "failed to get device status\n"); | ||
| 142 | goto resched; | ||
| 143 | } | ||
| 144 | 136 | ||
| 145 | if (status.total) | 137 | if (status.total) |
| 146 | utilization = div_u64((u64)status.busy * 100, status.total); | 138 | utilization = div_u64((u64)status.busy * 100, status.total); |
| @@ -150,11 +142,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) | |||
| 150 | nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n", | 142 | nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n", |
| 151 | utilization, data->avg_load); | 143 | utilization, data->avg_load); |
| 152 | 144 | ||
| 153 | ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state); | 145 | gk20a_pmu_dvfs_get_cur_state(pmu, &state); |
| 154 | if (ret) { | ||
| 155 | nvkm_warn(subdev, "failed to get current state\n"); | ||
| 156 | goto resched; | ||
| 157 | } | ||
| 158 | 146 | ||
| 159 | if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) { | 147 | if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) { |
| 160 | nvkm_trace(subdev, "set new state to %d\n", state); | 148 | nvkm_trace(subdev, "set new state to %d\n", state); |
| @@ -166,32 +154,36 @@ resched: | |||
| 166 | nvkm_timer_alarm(tmr, 100000000, alarm); | 154 | nvkm_timer_alarm(tmr, 100000000, alarm); |
| 167 | } | 155 | } |
| 168 | 156 | ||
| 169 | static int | 157 | static void |
| 170 | gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend) | 158 | gk20a_pmu_fini(struct nvkm_pmu *pmu) |
| 171 | { | 159 | { |
| 172 | struct gk20a_pmu *pmu = gk20a_pmu(subdev); | 160 | struct gk20a_pmu *gpmu = gk20a_pmu(pmu); |
| 173 | nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm); | 161 | nvkm_timer_alarm_cancel(pmu->subdev.device->timer, &gpmu->alarm); |
| 174 | return 0; | ||
| 175 | } | ||
| 176 | 162 | ||
| 177 | static void * | 163 | nvkm_falcon_put(pmu->falcon, &pmu->subdev); |
| 178 | gk20a_pmu_dtor(struct nvkm_subdev *subdev) | ||
| 179 | { | ||
| 180 | return gk20a_pmu(subdev); | ||
| 181 | } | 164 | } |
| 182 | 165 | ||
| 183 | static int | 166 | static int |
| 184 | gk20a_pmu_init(struct nvkm_subdev *subdev) | 167 | gk20a_pmu_init(struct nvkm_pmu *pmu) |
| 185 | { | 168 | { |
| 186 | struct gk20a_pmu *pmu = gk20a_pmu(subdev); | 169 | struct gk20a_pmu *gpmu = gk20a_pmu(pmu); |
| 187 | struct nvkm_device *device = pmu->base.subdev.device; | 170 | struct nvkm_subdev *subdev = &pmu->subdev; |
| 171 | struct nvkm_device *device = pmu->subdev.device; | ||
| 172 | struct nvkm_falcon *falcon = pmu->falcon; | ||
| 173 | int ret; | ||
| 174 | |||
| 175 | ret = nvkm_falcon_get(falcon, subdev); | ||
| 176 | if (ret) { | ||
| 177 | nvkm_error(subdev, "cannot acquire %s falcon!\n", falcon->name); | ||
| 178 | return ret; | ||
| 179 | } | ||
| 188 | 180 | ||
| 189 | /* init pwr perf counter */ | 181 | /* init pwr perf counter */ |
| 190 | nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001); | 182 | nvkm_falcon_wr32(falcon, 0x504 + (BUSY_SLOT * 0x10), 0x00200001); |
| 191 | nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002); | 183 | nvkm_falcon_wr32(falcon, 0x50c + (BUSY_SLOT * 0x10), 0x00000002); |
| 192 | nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003); | 184 | nvkm_falcon_wr32(falcon, 0x50c + (CLK_SLOT * 0x10), 0x00000003); |
| 193 | 185 | ||
| 194 | nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm); | 186 | nvkm_timer_alarm(device->timer, 2000000000, &gpmu->alarm); |
| 195 | return 0; | 187 | return 0; |
| 196 | } | 188 | } |
| 197 | 189 | ||
| @@ -202,26 +194,26 @@ gk20a_dvfs_data= { | |||
| 202 | .p_smooth = 1, | 194 | .p_smooth = 1, |
| 203 | }; | 195 | }; |
| 204 | 196 | ||
| 205 | static const struct nvkm_subdev_func | 197 | static const struct nvkm_pmu_func |
| 206 | gk20a_pmu = { | 198 | gk20a_pmu = { |
| 207 | .init = gk20a_pmu_init, | 199 | .init = gk20a_pmu_init, |
| 208 | .fini = gk20a_pmu_fini, | 200 | .fini = gk20a_pmu_fini, |
| 209 | .dtor = gk20a_pmu_dtor, | 201 | .reset = gt215_pmu_reset, |
| 210 | }; | 202 | }; |
| 211 | 203 | ||
| 212 | int | 204 | int |
| 213 | gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) | 205 | gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) |
| 214 | { | 206 | { |
| 215 | static const struct nvkm_pmu_func func = {}; | ||
| 216 | struct gk20a_pmu *pmu; | 207 | struct gk20a_pmu *pmu; |
| 217 | 208 | ||
| 218 | if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) | 209 | if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) |
| 219 | return -ENOMEM; | 210 | return -ENOMEM; |
| 220 | pmu->base.func = &func; | ||
| 221 | *ppmu = &pmu->base; | 211 | *ppmu = &pmu->base; |
| 222 | 212 | ||
| 223 | nvkm_subdev_ctor(&gk20a_pmu, device, index, &pmu->base.subdev); | 213 | nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base); |
| 214 | |||
| 224 | pmu->data = &gk20a_dvfs_data; | 215 | pmu->data = &gk20a_dvfs_data; |
| 225 | nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work); | 216 | nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work); |
| 217 | |||
| 226 | return 0; | 218 | return 0; |
| 227 | } | 219 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c new file mode 100644 index 000000000000..0b8a1cc4a0ee --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c | |||
| @@ -0,0 +1,34 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include "priv.h" | ||
| 24 | |||
| 25 | static const struct nvkm_pmu_func | ||
| 26 | gm20b_pmu = { | ||
| 27 | .reset = gt215_pmu_reset, | ||
| 28 | }; | ||
| 29 | |||
| 30 | int | ||
| 31 | gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) | ||
| 32 | { | ||
| 33 | return nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu); | ||
| 34 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index 2e2179a4ad17..096cba069f72 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h | |||
| @@ -4,6 +4,8 @@ | |||
| 4 | #include <subdev/pmu.h> | 4 | #include <subdev/pmu.h> |
| 5 | #include <subdev/pmu/fuc/os.h> | 5 | #include <subdev/pmu/fuc/os.h> |
| 6 | 6 | ||
| 7 | int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *, | ||
| 8 | int index, struct nvkm_pmu *); | ||
| 7 | int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *, | 9 | int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *, |
| 8 | int index, struct nvkm_pmu **); | 10 | int index, struct nvkm_pmu **); |
| 9 | 11 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild index b02b868a6589..5076d1500f47 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild | |||
| @@ -1,3 +1,7 @@ | |||
| 1 | nvkm-y += nvkm/subdev/secboot/base.o | 1 | nvkm-y += nvkm/subdev/secboot/base.o |
| 2 | nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o | ||
| 3 | nvkm-y += nvkm/subdev/secboot/acr.o | ||
| 4 | nvkm-y += nvkm/subdev/secboot/acr_r352.o | ||
| 5 | nvkm-y += nvkm/subdev/secboot/acr_r361.o | ||
| 2 | nvkm-y += nvkm/subdev/secboot/gm200.o | 6 | nvkm-y += nvkm/subdev/secboot/gm200.o |
| 3 | nvkm-y += nvkm/subdev/secboot/gm20b.o | 7 | nvkm-y += nvkm/subdev/secboot/gm20b.o |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c new file mode 100644 index 000000000000..75dc06557877 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include "acr.h" | ||
| 24 | |||
| 25 | #include <core/firmware.h> | ||
| 26 | |||
| 27 | /** | ||
| 28 | * Convenience function to duplicate a firmware file in memory and check that | ||
| 29 | * it has the required minimum size. | ||
| 30 | */ | ||
| 31 | void * | ||
| 32 | nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name, | ||
| 33 | size_t min_size) | ||
| 34 | { | ||
| 35 | const struct firmware *fw; | ||
| 36 | void *blob; | ||
| 37 | int ret; | ||
| 38 | |||
| 39 | ret = nvkm_firmware_get(subdev->device, name, &fw); | ||
| 40 | if (ret) | ||
| 41 | return ERR_PTR(ret); | ||
| 42 | if (fw->size < min_size) { | ||
| 43 | nvkm_error(subdev, "%s is smaller than expected size %zu\n", | ||
| 44 | name, min_size); | ||
| 45 | nvkm_firmware_put(fw); | ||
| 46 | return ERR_PTR(-EINVAL); | ||
| 47 | } | ||
| 48 | blob = kmemdup(fw->data, fw->size, GFP_KERNEL); | ||
| 49 | nvkm_firmware_put(fw); | ||
| 50 | if (!blob) | ||
| 51 | return ERR_PTR(-ENOMEM); | ||
| 52 | |||
| 53 | return blob; | ||
| 54 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h new file mode 100644 index 000000000000..97795b342b6f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h | |||
| @@ -0,0 +1,69 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | #ifndef __NVKM_SECBOOT_ACR_H__ | ||
| 23 | #define __NVKM_SECBOOT_ACR_H__ | ||
| 24 | |||
| 25 | #include "priv.h" | ||
| 26 | |||
| 27 | struct nvkm_acr; | ||
| 28 | |||
| 29 | /** | ||
| 30 | * struct nvkm_acr_func - properties and functions specific to an ACR | ||
| 31 | * | ||
| 32 | * @load: make the ACR ready to run on the given secboot device | ||
| 33 | * @reset: reset the specified falcon | ||
| 34 | * @start: start the specified falcon (assumed to have been reset) | ||
| 35 | */ | ||
| 36 | struct nvkm_acr_func { | ||
| 37 | void (*dtor)(struct nvkm_acr *); | ||
| 38 | int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *); | ||
| 39 | int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool); | ||
| 40 | int (*load)(struct nvkm_acr *, struct nvkm_secboot *, | ||
| 41 | struct nvkm_gpuobj *, u64); | ||
| 42 | int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, | ||
| 43 | enum nvkm_secboot_falcon); | ||
| 44 | int (*start)(struct nvkm_acr *, struct nvkm_secboot *, | ||
| 45 | enum nvkm_secboot_falcon); | ||
| 46 | }; | ||
| 47 | |||
| 48 | /** | ||
| 49 | * struct nvkm_acr - instance of an ACR | ||
| 50 | * | ||
| 51 | * @boot_falcon: ID of the falcon that will perform secure boot | ||
| 52 | * @managed_falcons: bitfield of falcons managed by this ACR | ||
| 53 | * @start_address: virtual start address of the HS bootloader | ||
| 54 | */ | ||
| 55 | struct nvkm_acr { | ||
| 56 | const struct nvkm_acr_func *func; | ||
| 57 | const struct nvkm_subdev *subdev; | ||
| 58 | |||
| 59 | enum nvkm_secboot_falcon boot_falcon; | ||
| 60 | unsigned long managed_falcons; | ||
| 61 | u32 start_address; | ||
| 62 | }; | ||
| 63 | |||
| 64 | void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t); | ||
| 65 | |||
| 66 | struct nvkm_acr *acr_r352_new(unsigned long); | ||
| 67 | struct nvkm_acr *acr_r361_new(unsigned long); | ||
| 68 | |||
| 69 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c new file mode 100644 index 000000000000..1aa37ea18580 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c | |||
| @@ -0,0 +1,936 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include "acr_r352.h" | ||
| 24 | |||
| 25 | #include <core/gpuobj.h> | ||
| 26 | #include <core/firmware.h> | ||
| 27 | #include <engine/falcon.h> | ||
| 28 | |||
| 29 | /** | ||
| 30 | * struct hsf_fw_header - HS firmware descriptor | ||
| 31 | * @sig_dbg_offset: offset of the debug signature | ||
| 32 | * @sig_dbg_size: size of the debug signature | ||
| 33 | * @sig_prod_offset: offset of the production signature | ||
| 34 | * @sig_prod_size: size of the production signature | ||
| 35 | * @patch_loc: offset of the offset (sic) of where the signature is | ||
| 36 | * @patch_sig: offset of the offset (sic) to add to sig_*_offset | ||
| 37 | * @hdr_offset: offset of the load header (see struct hs_load_header) | ||
| 38 | * @hdr_size: size of above header | ||
| 39 | * | ||
| 40 | * This structure is embedded in the HS firmware image at | ||
| 41 | * hs_bin_hdr.header_offset. | ||
| 42 | */ | ||
| 43 | struct hsf_fw_header { | ||
| 44 | u32 sig_dbg_offset; | ||
| 45 | u32 sig_dbg_size; | ||
| 46 | u32 sig_prod_offset; | ||
| 47 | u32 sig_prod_size; | ||
| 48 | u32 patch_loc; | ||
| 49 | u32 patch_sig; | ||
| 50 | u32 hdr_offset; | ||
| 51 | u32 hdr_size; | ||
| 52 | }; | ||
| 53 | |||
| 54 | /** | ||
| 55 | * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor | ||
| 56 | * @signature: 16B signature for secure code. 0s if no secure code | ||
| 57 | * @ctx_dma: DMA context to be used by BL while loading code/data | ||
| 58 | * @code_dma_base: 256B-aligned Physical FB Address where code is located | ||
| 59 | * (falcon's $xcbase register) | ||
| 60 | * @non_sec_code_off: offset from code_dma_base where the non-secure code is | ||
| 61 | * located. The offset must be multiple of 256 to help perf | ||
| 62 | * @non_sec_code_size: the size of the nonSecure code part. | ||
| 63 | * @sec_code_off: offset from code_dma_base where the secure code is | ||
| 64 | * located. The offset must be multiple of 256 to help perf | ||
| 65 | * @sec_code_size: offset from code_dma_base where the secure code is | ||
| 66 | * located. The offset must be multiple of 256 to help perf | ||
| 67 | * @code_entry_point: code entry point which will be invoked by BL after | ||
| 68 | * code is loaded. | ||
| 69 | * @data_dma_base: 256B aligned Physical FB Address where data is located. | ||
| 70 | * (falcon's $xdbase register) | ||
| 71 | * @data_size: size of data block. Should be multiple of 256B | ||
| 72 | * | ||
| 73 | * Structure used by the bootloader to load the rest of the code. This has | ||
| 74 | * to be filled by host and copied into DMEM at offset provided in the | ||
| 75 | * hsflcn_bl_desc.bl_desc_dmem_load_off. | ||
| 76 | */ | ||
| 77 | struct acr_r352_flcn_bl_desc { | ||
| 78 | u32 reserved[4]; | ||
| 79 | u32 signature[4]; | ||
| 80 | u32 ctx_dma; | ||
| 81 | u32 code_dma_base; | ||
| 82 | u32 non_sec_code_off; | ||
| 83 | u32 non_sec_code_size; | ||
| 84 | u32 sec_code_off; | ||
| 85 | u32 sec_code_size; | ||
| 86 | u32 code_entry_point; | ||
| 87 | u32 data_dma_base; | ||
| 88 | u32 data_size; | ||
| 89 | u32 code_dma_base1; | ||
| 90 | u32 data_dma_base1; | ||
| 91 | }; | ||
| 92 | |||
| 93 | /** | ||
| 94 | * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image | ||
| 95 | */ | ||
| 96 | static void | ||
| 97 | acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr, | ||
| 98 | const struct ls_ucode_img *_img, u64 wpr_addr, | ||
| 99 | void *_desc) | ||
| 100 | { | ||
| 101 | struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); | ||
| 102 | struct acr_r352_flcn_bl_desc *desc = _desc; | ||
| 103 | const struct ls_ucode_img_desc *pdesc = &_img->ucode_desc; | ||
| 104 | u64 base, addr_code, addr_data; | ||
| 105 | |||
| 106 | base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset; | ||
| 107 | addr_code = (base + pdesc->app_resident_code_offset) >> 8; | ||
| 108 | addr_data = (base + pdesc->app_resident_data_offset) >> 8; | ||
| 109 | |||
| 110 | desc->ctx_dma = FALCON_DMAIDX_UCODE; | ||
| 111 | desc->code_dma_base = lower_32_bits(addr_code); | ||
| 112 | desc->code_dma_base1 = upper_32_bits(addr_code); | ||
| 113 | desc->non_sec_code_off = pdesc->app_resident_code_offset; | ||
| 114 | desc->non_sec_code_size = pdesc->app_resident_code_size; | ||
| 115 | desc->code_entry_point = pdesc->app_imem_entry; | ||
| 116 | desc->data_dma_base = lower_32_bits(addr_data); | ||
| 117 | desc->data_dma_base1 = upper_32_bits(addr_data); | ||
| 118 | desc->data_size = pdesc->app_resident_data_size; | ||
| 119 | } | ||
| 120 | |||
| 121 | |||
| 122 | /** | ||
| 123 | * struct hsflcn_acr_desc - data section of the HS firmware | ||
| 124 | * | ||
| 125 | * This header is to be copied at the beginning of DMEM by the HS bootloader. | ||
| 126 | * | ||
| 127 | * @signature: signature of ACR ucode | ||
| 128 | * @wpr_region_id: region ID holding the WPR header and its details | ||
| 129 | * @wpr_offset: offset from the WPR region holding the wpr header | ||
| 130 | * @regions: region descriptors | ||
| 131 | * @nonwpr_ucode_blob_size: size of LS blob | ||
| 132 | * @nonwpr_ucode_blob_start: FB location of LS blob is | ||
| 133 | */ | ||
| 134 | struct hsflcn_acr_desc { | ||
| 135 | union { | ||
| 136 | u8 reserved_dmem[0x200]; | ||
| 137 | u32 signatures[4]; | ||
| 138 | } ucode_reserved_space; | ||
| 139 | u32 wpr_region_id; | ||
| 140 | u32 wpr_offset; | ||
| 141 | u32 mmu_mem_range; | ||
| 142 | #define FLCN_ACR_MAX_REGIONS 2 | ||
| 143 | struct { | ||
| 144 | u32 no_regions; | ||
| 145 | struct { | ||
| 146 | u32 start_addr; | ||
| 147 | u32 end_addr; | ||
| 148 | u32 region_id; | ||
| 149 | u32 read_mask; | ||
| 150 | u32 write_mask; | ||
| 151 | u32 client_mask; | ||
| 152 | } region_props[FLCN_ACR_MAX_REGIONS]; | ||
| 153 | } regions; | ||
| 154 | u32 ucode_blob_size; | ||
| 155 | u64 ucode_blob_base __aligned(8); | ||
| 156 | struct { | ||
| 157 | u32 vpr_enabled; | ||
| 158 | u32 vpr_start; | ||
| 159 | u32 vpr_end; | ||
| 160 | u32 hdcp_policies; | ||
| 161 | } vpr_desc; | ||
| 162 | }; | ||
| 163 | |||
| 164 | |||
| 165 | /* | ||
| 166 | * Low-secure blob creation | ||
| 167 | */ | ||
| 168 | |||
| 169 | /** | ||
| 170 | * ls_ucode_img_load() - create a lsf_ucode_img and load it | ||
| 171 | */ | ||
| 172 | struct ls_ucode_img * | ||
| 173 | acr_r352_ls_ucode_img_load(const struct acr_r352 *acr, | ||
| 174 | enum nvkm_secboot_falcon falcon_id) | ||
| 175 | { | ||
| 176 | const struct nvkm_subdev *subdev = acr->base.subdev; | ||
| 177 | struct ls_ucode_img_r352 *img; | ||
| 178 | int ret; | ||
| 179 | |||
| 180 | img = kzalloc(sizeof(*img), GFP_KERNEL); | ||
| 181 | if (!img) | ||
| 182 | return ERR_PTR(-ENOMEM); | ||
| 183 | |||
| 184 | img->base.falcon_id = falcon_id; | ||
| 185 | |||
| 186 | ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base); | ||
| 187 | |||
| 188 | if (ret) { | ||
| 189 | kfree(img->base.ucode_data); | ||
| 190 | kfree(img->base.sig); | ||
| 191 | kfree(img); | ||
| 192 | return ERR_PTR(ret); | ||
| 193 | } | ||
| 194 | |||
| 195 | /* Check that the signature size matches our expectations... */ | ||
| 196 | if (img->base.sig_size != sizeof(img->lsb_header.signature)) { | ||
| 197 | nvkm_error(subdev, "invalid signature size for %s falcon!\n", | ||
| 198 | nvkm_secboot_falcon_name[falcon_id]); | ||
| 199 | return ERR_PTR(-EINVAL); | ||
| 200 | } | ||
| 201 | |||
| 202 | /* Copy signature to the right place */ | ||
| 203 | memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); | ||
| 204 | |||
| 205 | /* not needed? the signature should already have the right value */ | ||
| 206 | img->lsb_header.signature.falcon_id = falcon_id; | ||
| 207 | |||
| 208 | return &img->base; | ||
| 209 | } | ||
| 210 | |||
| 211 | #define LSF_LSB_HEADER_ALIGN 256 | ||
| 212 | #define LSF_BL_DATA_ALIGN 256 | ||
| 213 | #define LSF_BL_DATA_SIZE_ALIGN 256 | ||
| 214 | #define LSF_BL_CODE_SIZE_ALIGN 256 | ||
| 215 | #define LSF_UCODE_DATA_ALIGN 4096 | ||
| 216 | |||
| 217 | /** | ||
| 218 | * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image | ||
| 219 | * @acr: ACR to use | ||
| 220 | * @img: image to generate for | ||
| 221 | * @offset: offset in the WPR region where this image starts | ||
| 222 | * | ||
| 223 | * Allocate space in the WPR area from offset and write the WPR and LSB headers | ||
| 224 | * accordingly. | ||
| 225 | * | ||
| 226 | * Return: offset at the end of this image. | ||
| 227 | */ | ||
| 228 | static u32 | ||
| 229 | acr_r352_ls_img_fill_headers(struct acr_r352 *acr, | ||
| 230 | struct ls_ucode_img_r352 *img, u32 offset) | ||
| 231 | { | ||
| 232 | struct ls_ucode_img *_img = &img->base; | ||
| 233 | struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header; | ||
| 234 | struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header; | ||
| 235 | struct ls_ucode_img_desc *desc = &_img->ucode_desc; | ||
| 236 | const struct acr_r352_ls_func *func = | ||
| 237 | acr->func->ls_func[_img->falcon_id]; | ||
| 238 | |||
| 239 | /* Fill WPR header */ | ||
| 240 | whdr->falcon_id = _img->falcon_id; | ||
| 241 | whdr->bootstrap_owner = acr->base.boot_falcon; | ||
| 242 | whdr->status = LSF_IMAGE_STATUS_COPY; | ||
| 243 | |||
| 244 | /* Skip bootstrapping falcons started by someone else than ACR */ | ||
| 245 | if (acr->lazy_bootstrap & BIT(_img->falcon_id)) | ||
| 246 | whdr->lazy_bootstrap = 1; | ||
| 247 | |||
| 248 | /* Align, save off, and include an LSB header size */ | ||
| 249 | offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); | ||
| 250 | whdr->lsb_offset = offset; | ||
| 251 | offset += sizeof(*lhdr); | ||
| 252 | |||
| 253 | /* | ||
| 254 | * Align, save off, and include the original (static) ucode | ||
| 255 | * image size | ||
| 256 | */ | ||
| 257 | offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); | ||
| 258 | lhdr->ucode_off = offset; | ||
| 259 | offset += _img->ucode_size; | ||
| 260 | |||
| 261 | /* | ||
| 262 | * For falcons that use a boot loader (BL), we append a loader | ||
| 263 | * desc structure on the end of the ucode image and consider | ||
| 264 | * this the boot loader data. The host will then copy the loader | ||
| 265 | * desc args to this space within the WPR region (before locking | ||
| 266 | * down) and the HS bin will then copy them to DMEM 0 for the | ||
| 267 | * loader. | ||
| 268 | */ | ||
| 269 | lhdr->bl_code_size = ALIGN(desc->bootloader_size, | ||
| 270 | LSF_BL_CODE_SIZE_ALIGN); | ||
| 271 | lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, | ||
| 272 | LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; | ||
| 273 | lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + | ||
| 274 | lhdr->bl_code_size - lhdr->ucode_size; | ||
| 275 | /* | ||
| 276 | * Though the BL is located at 0th offset of the image, the VA | ||
| 277 | * is different to make sure that it doesn't collide the actual | ||
| 278 | * OS VA range | ||
| 279 | */ | ||
| 280 | lhdr->bl_imem_off = desc->bootloader_imem_offset; | ||
| 281 | lhdr->app_code_off = desc->app_start_offset + | ||
| 282 | desc->app_resident_code_offset; | ||
| 283 | lhdr->app_code_size = desc->app_resident_code_size; | ||
| 284 | lhdr->app_data_off = desc->app_start_offset + | ||
| 285 | desc->app_resident_data_offset; | ||
| 286 | lhdr->app_data_size = desc->app_resident_data_size; | ||
| 287 | |||
| 288 | lhdr->flags = func->lhdr_flags; | ||
| 289 | if (_img->falcon_id == acr->base.boot_falcon) | ||
| 290 | lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; | ||
| 291 | |||
| 292 | /* Align and save off BL descriptor size */ | ||
| 293 | lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); | ||
| 294 | |||
| 295 | /* | ||
| 296 | * Align, save off, and include the additional BL data | ||
| 297 | */ | ||
| 298 | offset = ALIGN(offset, LSF_BL_DATA_ALIGN); | ||
| 299 | lhdr->bl_data_off = offset; | ||
| 300 | offset += lhdr->bl_data_size; | ||
| 301 | |||
| 302 | return offset; | ||
| 303 | } | ||
| 304 | |||
| 305 | /** | ||
| 306 | * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images | ||
| 307 | */ | ||
| 308 | int | ||
| 309 | acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) | ||
| 310 | { | ||
| 311 | struct ls_ucode_img_r352 *img; | ||
| 312 | struct list_head *l; | ||
| 313 | u32 count = 0; | ||
| 314 | u32 offset; | ||
| 315 | |||
| 316 | /* Count the number of images to manage */ | ||
| 317 | list_for_each(l, imgs) | ||
| 318 | count++; | ||
| 319 | |||
| 320 | /* | ||
| 321 | * Start with an array of WPR headers at the base of the WPR. | ||
| 322 | * The expectation here is that the secure falcon will do a single DMA | ||
| 323 | * read of this array and cache it internally so it's ok to pack these. | ||
| 324 | * Also, we add 1 to the falcon count to indicate the end of the array. | ||
| 325 | */ | ||
| 326 | offset = sizeof(img->wpr_header) * (count + 1); | ||
| 327 | |||
| 328 | /* | ||
| 329 | * Walk the managed falcons, accounting for the LSB structs | ||
| 330 | * as well as the ucode images. | ||
| 331 | */ | ||
| 332 | list_for_each_entry(img, imgs, base.node) { | ||
| 333 | offset = acr_r352_ls_img_fill_headers(acr, img, offset); | ||
| 334 | } | ||
| 335 | |||
| 336 | return offset; | ||
| 337 | } | ||
| 338 | |||
| 339 | /** | ||
| 340 | * acr_r352_ls_write_wpr - write the WPR blob contents | ||
| 341 | */ | ||
| 342 | int | ||
| 343 | acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, | ||
| 344 | struct nvkm_gpuobj *wpr_blob, u32 wpr_addr) | ||
| 345 | { | ||
| 346 | struct ls_ucode_img *_img; | ||
| 347 | u32 pos = 0; | ||
| 348 | |||
| 349 | nvkm_kmap(wpr_blob); | ||
| 350 | |||
| 351 | list_for_each_entry(_img, imgs, node) { | ||
| 352 | struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); | ||
| 353 | const struct acr_r352_ls_func *ls_func = | ||
| 354 | acr->func->ls_func[_img->falcon_id]; | ||
| 355 | u8 gdesc[ls_func->bl_desc_size]; | ||
| 356 | |||
| 357 | nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, | ||
| 358 | sizeof(img->wpr_header)); | ||
| 359 | |||
| 360 | nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, | ||
| 361 | &img->lsb_header, sizeof(img->lsb_header)); | ||
| 362 | |||
| 363 | /* Generate and write BL descriptor */ | ||
| 364 | memset(gdesc, 0, ls_func->bl_desc_size); | ||
| 365 | ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); | ||
| 366 | |||
| 367 | nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, | ||
| 368 | gdesc, ls_func->bl_desc_size); | ||
| 369 | |||
| 370 | /* Copy ucode */ | ||
| 371 | nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, | ||
| 372 | _img->ucode_data, _img->ucode_size); | ||
| 373 | |||
| 374 | pos += sizeof(img->wpr_header); | ||
| 375 | } | ||
| 376 | |||
| 377 | nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); | ||
| 378 | |||
| 379 | nvkm_done(wpr_blob); | ||
| 380 | |||
| 381 | return 0; | ||
| 382 | } | ||
| 383 | |||
| 384 | /* Both size and address of WPR need to be 128K-aligned */ | ||
| 385 | #define WPR_ALIGNMENT 0x20000 | ||
| 386 | /** | ||
| 387 | * acr_r352_prepare_ls_blob() - prepare the LS blob | ||
| 388 | * | ||
| 389 | * For each securely managed falcon, load the FW, signatures and bootloaders and | ||
| 390 | * prepare a ucode blob. Then, compute the offsets in the WPR region for each | ||
| 391 | * blob, and finally write the headers and ucode blobs into a GPU object that | ||
| 392 | * will be copied into the WPR region by the HS firmware. | ||
| 393 | */ | ||
| 394 | static int | ||
| 395 | acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) | ||
| 396 | { | ||
| 397 | const struct nvkm_subdev *subdev = acr->base.subdev; | ||
| 398 | struct list_head imgs; | ||
| 399 | struct ls_ucode_img *img, *t; | ||
| 400 | unsigned long managed_falcons = acr->base.managed_falcons; | ||
| 401 | int managed_count = 0; | ||
| 402 | u32 image_wpr_size; | ||
| 403 | int falcon_id; | ||
| 404 | int ret; | ||
| 405 | |||
| 406 | INIT_LIST_HEAD(&imgs); | ||
| 407 | |||
| 408 | /* Load all LS blobs */ | ||
| 409 | for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { | ||
| 410 | struct ls_ucode_img *img; | ||
| 411 | |||
| 412 | img = acr->func->ls_ucode_img_load(acr, falcon_id); | ||
| 413 | if (IS_ERR(img)) { | ||
| 414 | ret = PTR_ERR(img); | ||
| 415 | goto cleanup; | ||
| 416 | } | ||
| 417 | |||
| 418 | list_add_tail(&img->node, &imgs); | ||
| 419 | managed_count++; | ||
| 420 | } | ||
| 421 | |||
| 422 | /* | ||
| 423 | * Fill the WPR and LSF headers with the right offsets and compute | ||
| 424 | * required WPR size | ||
| 425 | */ | ||
| 426 | image_wpr_size = acr->func->ls_fill_headers(acr, &imgs); | ||
| 427 | image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT); | ||
| 428 | |||
| 429 | /* Allocate GPU object that will contain the WPR region */ | ||
| 430 | ret = nvkm_gpuobj_new(subdev->device, image_wpr_size, WPR_ALIGNMENT, | ||
| 431 | false, NULL, &acr->ls_blob); | ||
| 432 | if (ret) | ||
| 433 | goto cleanup; | ||
| 434 | |||
| 435 | nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n", | ||
| 436 | managed_count, image_wpr_size); | ||
| 437 | |||
| 438 | /* If WPR address and size are not fixed, set them to fit the LS blob */ | ||
| 439 | if (wpr_size == 0) { | ||
| 440 | wpr_addr = acr->ls_blob->addr; | ||
| 441 | wpr_size = image_wpr_size; | ||
| 442 | /* | ||
| 443 | * But if the WPR region is set by the bootloader, it is illegal for | ||
| 444 | * the HS blob to be larger than this region. | ||
| 445 | */ | ||
| 446 | } else if (image_wpr_size > wpr_size) { | ||
| 447 | nvkm_error(subdev, "WPR region too small for FW blob!\n"); | ||
| 448 | nvkm_error(subdev, "required: %dB\n", image_wpr_size); | ||
| 449 | nvkm_error(subdev, "available: %dB\n", wpr_size); | ||
| 450 | ret = -ENOSPC; | ||
| 451 | goto cleanup; | ||
| 452 | } | ||
| 453 | |||
| 454 | /* Write LS blob */ | ||
| 455 | ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr); | ||
| 456 | if (ret) | ||
| 457 | nvkm_gpuobj_del(&acr->ls_blob); | ||
| 458 | |||
| 459 | cleanup: | ||
| 460 | list_for_each_entry_safe(img, t, &imgs, node) { | ||
| 461 | kfree(img->ucode_data); | ||
| 462 | kfree(img->sig); | ||
| 463 | kfree(img); | ||
| 464 | } | ||
| 465 | |||
| 466 | return ret; | ||
| 467 | } | ||
| 468 | |||
| 469 | |||
| 470 | |||
| 471 | |||
| 472 | /** | ||
| 473 | * acr_r352_hsf_patch_signature() - patch HS blob with correct signature | ||
| 474 | */ | ||
| 475 | static void | ||
| 476 | acr_r352_hsf_patch_signature(struct nvkm_secboot *sb, void *acr_image) | ||
| 477 | { | ||
| 478 | struct fw_bin_header *hsbin_hdr = acr_image; | ||
| 479 | struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset; | ||
| 480 | void *hs_data = acr_image + hsbin_hdr->data_offset; | ||
| 481 | void *sig; | ||
| 482 | u32 sig_size; | ||
| 483 | |||
| 484 | /* Falcon in debug or production mode? */ | ||
| 485 | if (sb->boot_falcon->debug) { | ||
| 486 | sig = acr_image + fw_hdr->sig_dbg_offset; | ||
| 487 | sig_size = fw_hdr->sig_dbg_size; | ||
| 488 | } else { | ||
| 489 | sig = acr_image + fw_hdr->sig_prod_offset; | ||
| 490 | sig_size = fw_hdr->sig_prod_size; | ||
| 491 | } | ||
| 492 | |||
| 493 | /* Patch signature */ | ||
| 494 | memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size); | ||
| 495 | } | ||
| 496 | |||
| 497 | static void | ||
| 498 | acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, | ||
| 499 | struct hsflcn_acr_desc *desc) | ||
| 500 | { | ||
| 501 | struct nvkm_gpuobj *ls_blob = acr->ls_blob; | ||
| 502 | |||
| 503 | /* WPR region information if WPR is not fixed */ | ||
| 504 | if (sb->wpr_size == 0) { | ||
| 505 | u32 wpr_start = ls_blob->addr; | ||
| 506 | u32 wpr_end = wpr_start + ls_blob->size; | ||
| 507 | |||
| 508 | desc->wpr_region_id = 1; | ||
| 509 | desc->regions.no_regions = 2; | ||
| 510 | desc->regions.region_props[0].start_addr = wpr_start >> 8; | ||
| 511 | desc->regions.region_props[0].end_addr = wpr_end >> 8; | ||
| 512 | desc->regions.region_props[0].region_id = 1; | ||
| 513 | desc->regions.region_props[0].read_mask = 0xf; | ||
| 514 | desc->regions.region_props[0].write_mask = 0xc; | ||
| 515 | desc->regions.region_props[0].client_mask = 0x2; | ||
| 516 | } else { | ||
| 517 | desc->ucode_blob_base = ls_blob->addr; | ||
| 518 | desc->ucode_blob_size = ls_blob->size; | ||
| 519 | } | ||
| 520 | } | ||
| 521 | |||
| 522 | static void | ||
| 523 | acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, | ||
| 524 | u64 offset) | ||
| 525 | { | ||
| 526 | struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc; | ||
| 527 | u64 addr_code, addr_data; | ||
| 528 | |||
| 529 | addr_code = offset >> 8; | ||
| 530 | addr_data = (offset + hdr->data_dma_base) >> 8; | ||
| 531 | |||
| 532 | bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; | ||
| 533 | bl_desc->code_dma_base = lower_32_bits(addr_code); | ||
| 534 | bl_desc->non_sec_code_off = hdr->non_sec_code_off; | ||
| 535 | bl_desc->non_sec_code_size = hdr->non_sec_code_size; | ||
| 536 | bl_desc->sec_code_off = hdr->app[0].sec_code_off; | ||
| 537 | bl_desc->sec_code_size = hdr->app[0].sec_code_size; | ||
| 538 | bl_desc->code_entry_point = 0; | ||
| 539 | bl_desc->data_dma_base = lower_32_bits(addr_data); | ||
| 540 | bl_desc->data_size = hdr->data_size; | ||
| 541 | } | ||
| 542 | |||
| 543 | /** | ||
| 544 | * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor | ||
| 545 | * | ||
| 546 | * @sb secure boot instance to prepare for | ||
| 547 | * @fw name of the HS firmware to load | ||
| 548 | * @blob pointer to gpuobj that will be allocated to receive the HS FW payload | ||
| 549 | * @bl_desc pointer to the BL descriptor to write for this firmware | ||
| 550 | * @patch whether we should patch the HS descriptor (only for HS loaders) | ||
| 551 | */ | ||
| 552 | static int | ||
| 553 | acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, | ||
| 554 | const char *fw, struct nvkm_gpuobj **blob, | ||
| 555 | struct hsf_load_header *load_header, bool patch) | ||
| 556 | { | ||
| 557 | struct nvkm_subdev *subdev = &sb->subdev; | ||
| 558 | void *acr_image; | ||
| 559 | struct fw_bin_header *hsbin_hdr; | ||
| 560 | struct hsf_fw_header *fw_hdr; | ||
| 561 | struct hsf_load_header *load_hdr; | ||
| 562 | void *acr_data; | ||
| 563 | int ret; | ||
| 564 | |||
| 565 | acr_image = nvkm_acr_load_firmware(subdev, fw, 0); | ||
| 566 | if (IS_ERR(acr_image)) | ||
| 567 | return PTR_ERR(acr_image); | ||
| 568 | |||
| 569 | hsbin_hdr = acr_image; | ||
| 570 | fw_hdr = acr_image + hsbin_hdr->header_offset; | ||
| 571 | load_hdr = acr_image + fw_hdr->hdr_offset; | ||
| 572 | acr_data = acr_image + hsbin_hdr->data_offset; | ||
| 573 | |||
| 574 | /* Patch signature */ | ||
| 575 | acr_r352_hsf_patch_signature(sb, acr_image); | ||
| 576 | |||
| 577 | /* Patch descriptor with WPR information? */ | ||
| 578 | if (patch) { | ||
| 579 | struct hsflcn_acr_desc *desc; | ||
| 580 | |||
| 581 | desc = acr_data + load_hdr->data_dma_base; | ||
| 582 | acr_r352_fixup_hs_desc(acr, sb, desc); | ||
| 583 | } | ||
| 584 | |||
| 585 | if (load_hdr->num_apps > ACR_R352_MAX_APPS) { | ||
| 586 | nvkm_error(subdev, "more apps (%d) than supported (%d)!", | ||
| 587 | load_hdr->num_apps, ACR_R352_MAX_APPS); | ||
| 588 | ret = -EINVAL; | ||
| 589 | goto cleanup; | ||
| 590 | } | ||
| 591 | memcpy(load_header, load_hdr, sizeof(*load_header) + | ||
| 592 | (sizeof(load_hdr->app[0]) * load_hdr->num_apps)); | ||
| 593 | |||
| 594 | /* Create ACR blob and copy HS data to it */ | ||
| 595 | ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), | ||
| 596 | 0x1000, false, NULL, blob); | ||
| 597 | if (ret) | ||
| 598 | goto cleanup; | ||
| 599 | |||
| 600 | nvkm_kmap(*blob); | ||
| 601 | nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size); | ||
| 602 | nvkm_done(*blob); | ||
| 603 | |||
| 604 | cleanup: | ||
| 605 | kfree(acr_image); | ||
| 606 | |||
| 607 | return ret; | ||
| 608 | } | ||
| 609 | |||
| 610 | static int | ||
| 611 | acr_r352_prepare_hsbl_blob(struct acr_r352 *acr) | ||
| 612 | { | ||
| 613 | const struct nvkm_subdev *subdev = acr->base.subdev; | ||
| 614 | struct fw_bin_header *hdr; | ||
| 615 | struct fw_bl_desc *hsbl_desc; | ||
| 616 | |||
| 617 | acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0); | ||
| 618 | if (IS_ERR(acr->hsbl_blob)) { | ||
| 619 | int ret = PTR_ERR(acr->hsbl_blob); | ||
| 620 | |||
| 621 | acr->hsbl_blob = NULL; | ||
| 622 | return ret; | ||
| 623 | } | ||
| 624 | |||
| 625 | hdr = acr->hsbl_blob; | ||
| 626 | hsbl_desc = acr->hsbl_blob + hdr->header_offset; | ||
| 627 | |||
| 628 | /* virtual start address for boot vector */ | ||
| 629 | acr->base.start_address = hsbl_desc->start_tag << 8; | ||
| 630 | |||
| 631 | return 0; | ||
| 632 | } | ||
| 633 | |||
| 634 | /** | ||
| 635 | * acr_r352_load_blobs - load blobs common to all ACR V1 versions. | ||
| 636 | * | ||
| 637 | * This includes the LS blob, HS ucode loading blob, and HS bootloader. | ||
| 638 | * | ||
| 639 | * The HS ucode unload blob is only used on dGPU if the WPR region is variable. | ||
| 640 | */ | ||
| 641 | int | ||
| 642 | acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) | ||
| 643 | { | ||
| 644 | int ret; | ||
| 645 | |||
| 646 | /* Firmware already loaded? */ | ||
| 647 | if (acr->firmware_ok) | ||
| 648 | return 0; | ||
| 649 | |||
| 650 | /* Load and prepare the managed falcon's firmwares */ | ||
| 651 | ret = acr_r352_prepare_ls_blob(acr, sb->wpr_addr, sb->wpr_size); | ||
| 652 | if (ret) | ||
| 653 | return ret; | ||
| 654 | |||
| 655 | /* Load the HS firmware that will load the LS firmwares */ | ||
| 656 | if (!acr->load_blob) { | ||
| 657 | ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load", | ||
| 658 | &acr->load_blob, | ||
| 659 | &acr->load_bl_header, true); | ||
| 660 | if (ret) | ||
| 661 | return ret; | ||
| 662 | } | ||
| 663 | |||
| 664 | /* If the ACR region is dynamically programmed, we need an unload FW */ | ||
| 665 | if (sb->wpr_size == 0) { | ||
| 666 | ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload", | ||
| 667 | &acr->unload_blob, | ||
| 668 | &acr->unload_bl_header, false); | ||
| 669 | if (ret) | ||
| 670 | return ret; | ||
| 671 | } | ||
| 672 | |||
| 673 | /* Load the HS firmware bootloader */ | ||
| 674 | if (!acr->hsbl_blob) { | ||
| 675 | ret = acr_r352_prepare_hsbl_blob(acr); | ||
| 676 | if (ret) | ||
| 677 | return ret; | ||
| 678 | } | ||
| 679 | |||
| 680 | acr->firmware_ok = true; | ||
| 681 | nvkm_debug(&sb->subdev, "LS blob successfully created\n"); | ||
| 682 | |||
| 683 | return 0; | ||
| 684 | } | ||
| 685 | |||
| 686 | /** | ||
| 687 | * acr_r352_load() - prepare HS falcon to run the specified blob, mapped | ||
| 688 | * at GPU address offset. | ||
| 689 | */ | ||
| 690 | static int | ||
| 691 | acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb, | ||
| 692 | struct nvkm_gpuobj *blob, u64 offset) | ||
| 693 | { | ||
| 694 | struct acr_r352 *acr = acr_r352(_acr); | ||
| 695 | struct nvkm_falcon *falcon = sb->boot_falcon; | ||
| 696 | struct fw_bin_header *hdr = acr->hsbl_blob; | ||
| 697 | struct fw_bl_desc *hsbl_desc = acr->hsbl_blob + hdr->header_offset; | ||
| 698 | void *blob_data = acr->hsbl_blob + hdr->data_offset; | ||
| 699 | void *hsbl_code = blob_data + hsbl_desc->code_off; | ||
| 700 | void *hsbl_data = blob_data + hsbl_desc->data_off; | ||
| 701 | u32 code_size = ALIGN(hsbl_desc->code_size, 256); | ||
| 702 | const struct hsf_load_header *load_hdr; | ||
| 703 | const u32 bl_desc_size = acr->func->hs_bl_desc_size; | ||
| 704 | u8 bl_desc[bl_desc_size]; | ||
| 705 | |||
| 706 | /* Find the bootloader descriptor for our blob and copy it */ | ||
| 707 | if (blob == acr->load_blob) { | ||
| 708 | load_hdr = &acr->load_bl_header; | ||
| 709 | } else if (blob == acr->unload_blob) { | ||
| 710 | load_hdr = &acr->unload_bl_header; | ||
| 711 | } else { | ||
| 712 | nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); | ||
| 713 | return -EINVAL; | ||
| 714 | } | ||
| 715 | |||
| 716 | /* | ||
| 717 | * Copy HS bootloader data | ||
| 718 | */ | ||
| 719 | nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0); | ||
| 720 | |||
| 721 | /* Copy HS bootloader code to end of IMEM */ | ||
| 722 | nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size, | ||
| 723 | code_size, hsbl_desc->start_tag, 0, false); | ||
| 724 | |||
| 725 | /* Generate the BL header */ | ||
| 726 | memset(bl_desc, 0, bl_desc_size); | ||
| 727 | acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset); | ||
| 728 | |||
| 729 | /* | ||
| 730 | * Copy HS BL header where the HS descriptor expects it to be | ||
| 731 | */ | ||
| 732 | nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off, | ||
| 733 | bl_desc_size, 0); | ||
| 734 | |||
| 735 | return 0; | ||
| 736 | } | ||
| 737 | |||
| 738 | static int | ||
| 739 | acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb) | ||
| 740 | { | ||
| 741 | int i; | ||
| 742 | |||
| 743 | /* Run the unload blob to unprotect the WPR region */ | ||
| 744 | if (acr->unload_blob && sb->wpr_set) { | ||
| 745 | int ret; | ||
| 746 | |||
| 747 | nvkm_debug(&sb->subdev, "running HS unload blob\n"); | ||
| 748 | ret = sb->func->run_blob(sb, acr->unload_blob); | ||
| 749 | if (ret) | ||
| 750 | return ret; | ||
| 751 | nvkm_debug(&sb->subdev, "HS unload blob completed\n"); | ||
| 752 | } | ||
| 753 | |||
| 754 | for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) | ||
| 755 | acr->falcon_state[i] = NON_SECURE; | ||
| 756 | |||
| 757 | sb->wpr_set = false; | ||
| 758 | |||
| 759 | return 0; | ||
| 760 | } | ||
| 761 | |||
| 762 | static int | ||
| 763 | acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) | ||
| 764 | { | ||
| 765 | int ret; | ||
| 766 | |||
| 767 | if (sb->wpr_set) | ||
| 768 | return 0; | ||
| 769 | |||
| 770 | /* Make sure all blobs are ready */ | ||
| 771 | ret = acr_r352_load_blobs(acr, sb); | ||
| 772 | if (ret) | ||
| 773 | return ret; | ||
| 774 | |||
| 775 | nvkm_debug(&sb->subdev, "running HS load blob\n"); | ||
| 776 | ret = sb->func->run_blob(sb, acr->load_blob); | ||
| 777 | /* clear halt interrupt */ | ||
| 778 | nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10); | ||
| 779 | if (ret) | ||
| 780 | return ret; | ||
| 781 | nvkm_debug(&sb->subdev, "HS load blob completed\n"); | ||
| 782 | |||
| 783 | sb->wpr_set = true; | ||
| 784 | |||
| 785 | return 0; | ||
| 786 | } | ||
| 787 | |||
| 788 | /* | ||
| 789 | * acr_r352_reset() - execute secure boot from the prepared state | ||
| 790 | * | ||
| 791 | * Load the HS bootloader and ask the falcon to run it. This will in turn | ||
| 792 | * load the HS firmware and run it, so once the falcon stops all the managed | ||
| 793 | * falcons should have their LS firmware loaded and be ready to run. | ||
| 794 | */ | ||
| 795 | static int | ||
| 796 | acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, | ||
| 797 | enum nvkm_secboot_falcon falcon) | ||
| 798 | { | ||
| 799 | struct acr_r352 *acr = acr_r352(_acr); | ||
| 800 | int ret; | ||
| 801 | |||
| 802 | /* | ||
| 803 | * Dummy GM200 implementation: perform secure boot each time we are | ||
| 804 | * called on FECS. Since only FECS and GPCCS are managed and started | ||
| 805 | * together, this ought to be safe. | ||
| 806 | * | ||
| 807 | * Once we have proper PMU firmware and support, this will be changed | ||
| 808 | * to a proper call to the PMU method. | ||
| 809 | */ | ||
| 810 | if (falcon != NVKM_SECBOOT_FALCON_FECS) | ||
| 811 | goto end; | ||
| 812 | |||
| 813 | ret = acr_r352_shutdown(acr, sb); | ||
| 814 | if (ret) | ||
| 815 | return ret; | ||
| 816 | |||
| 817 | acr_r352_bootstrap(acr, sb); | ||
| 818 | if (ret) | ||
| 819 | return ret; | ||
| 820 | |||
| 821 | end: | ||
| 822 | acr->falcon_state[falcon] = RESET; | ||
| 823 | return 0; | ||
| 824 | } | ||
| 825 | |||
| 826 | static int | ||
| 827 | acr_r352_start(struct nvkm_acr *_acr, struct nvkm_secboot *sb, | ||
| 828 | enum nvkm_secboot_falcon falcon) | ||
| 829 | { | ||
| 830 | struct acr_r352 *acr = acr_r352(_acr); | ||
| 831 | const struct nvkm_subdev *subdev = &sb->subdev; | ||
| 832 | int base; | ||
| 833 | |||
| 834 | switch (falcon) { | ||
| 835 | case NVKM_SECBOOT_FALCON_FECS: | ||
| 836 | base = 0x409000; | ||
| 837 | break; | ||
| 838 | case NVKM_SECBOOT_FALCON_GPCCS: | ||
| 839 | base = 0x41a000; | ||
| 840 | break; | ||
| 841 | default: | ||
| 842 | nvkm_error(subdev, "cannot start unhandled falcon!\n"); | ||
| 843 | return -EINVAL; | ||
| 844 | } | ||
| 845 | |||
| 846 | nvkm_wr32(subdev->device, base + 0x130, 0x00000002); | ||
| 847 | acr->falcon_state[falcon] = RUNNING; | ||
| 848 | |||
| 849 | return 0; | ||
| 850 | } | ||
| 851 | |||
| 852 | static int | ||
| 853 | acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend) | ||
| 854 | { | ||
| 855 | struct acr_r352 *acr = acr_r352(_acr); | ||
| 856 | |||
| 857 | return acr_r352_shutdown(acr, sb); | ||
| 858 | } | ||
| 859 | |||
| 860 | static void | ||
| 861 | acr_r352_dtor(struct nvkm_acr *_acr) | ||
| 862 | { | ||
| 863 | struct acr_r352 *acr = acr_r352(_acr); | ||
| 864 | |||
| 865 | nvkm_gpuobj_del(&acr->unload_blob); | ||
| 866 | |||
| 867 | kfree(acr->hsbl_blob); | ||
| 868 | nvkm_gpuobj_del(&acr->load_blob); | ||
| 869 | nvkm_gpuobj_del(&acr->ls_blob); | ||
| 870 | |||
| 871 | kfree(acr); | ||
| 872 | } | ||
| 873 | |||
| 874 | const struct acr_r352_ls_func | ||
| 875 | acr_r352_ls_fecs_func = { | ||
| 876 | .load = acr_ls_ucode_load_fecs, | ||
| 877 | .generate_bl_desc = acr_r352_generate_flcn_bl_desc, | ||
| 878 | .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), | ||
| 879 | }; | ||
| 880 | |||
| 881 | const struct acr_r352_ls_func | ||
| 882 | acr_r352_ls_gpccs_func = { | ||
| 883 | .load = acr_ls_ucode_load_gpccs, | ||
| 884 | .generate_bl_desc = acr_r352_generate_flcn_bl_desc, | ||
| 885 | .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), | ||
| 886 | /* GPCCS will be loaded using PRI */ | ||
| 887 | .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, | ||
| 888 | }; | ||
| 889 | |||
| 890 | const struct acr_r352_func | ||
| 891 | acr_r352_func = { | ||
| 892 | .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc, | ||
| 893 | .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), | ||
| 894 | .ls_ucode_img_load = acr_r352_ls_ucode_img_load, | ||
| 895 | .ls_fill_headers = acr_r352_ls_fill_headers, | ||
| 896 | .ls_write_wpr = acr_r352_ls_write_wpr, | ||
| 897 | .ls_func = { | ||
| 898 | [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func, | ||
| 899 | [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func, | ||
| 900 | }, | ||
| 901 | }; | ||
| 902 | |||
| 903 | static const struct nvkm_acr_func | ||
| 904 | acr_r352_base_func = { | ||
| 905 | .dtor = acr_r352_dtor, | ||
| 906 | .fini = acr_r352_fini, | ||
| 907 | .load = acr_r352_load, | ||
| 908 | .reset = acr_r352_reset, | ||
| 909 | .start = acr_r352_start, | ||
| 910 | }; | ||
| 911 | |||
| 912 | struct nvkm_acr * | ||
| 913 | acr_r352_new_(const struct acr_r352_func *func, | ||
| 914 | enum nvkm_secboot_falcon boot_falcon, | ||
| 915 | unsigned long managed_falcons) | ||
| 916 | { | ||
| 917 | struct acr_r352 *acr; | ||
| 918 | |||
| 919 | acr = kzalloc(sizeof(*acr), GFP_KERNEL); | ||
| 920 | if (!acr) | ||
| 921 | return ERR_PTR(-ENOMEM); | ||
| 922 | |||
| 923 | acr->base.boot_falcon = boot_falcon; | ||
| 924 | acr->base.managed_falcons = managed_falcons; | ||
| 925 | acr->base.func = &acr_r352_base_func; | ||
| 926 | acr->func = func; | ||
| 927 | |||
| 928 | return &acr->base; | ||
| 929 | } | ||
| 930 | |||
| 931 | struct nvkm_acr * | ||
| 932 | acr_r352_new(unsigned long managed_falcons) | ||
| 933 | { | ||
| 934 | return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU, | ||
| 935 | managed_falcons); | ||
| 936 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h new file mode 100644 index 000000000000..ad5923b0fd3c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h | |||
| @@ -0,0 +1,250 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | #ifndef __NVKM_SECBOOT_ACR_R352_H__ | ||
| 23 | #define __NVKM_SECBOOT_ACR_R352_H__ | ||
| 24 | |||
| 25 | #include "acr.h" | ||
| 26 | #include "ls_ucode.h" | ||
| 27 | |||
| 28 | struct ls_ucode_img; | ||
| 29 | |||
| 30 | #define ACR_R352_MAX_APPS 8 | ||
| 31 | |||
| 32 | /* | ||
| 33 | * | ||
| 34 | * LS blob structures | ||
| 35 | * | ||
| 36 | */ | ||
| 37 | |||
| 38 | /** | ||
| 39 | * struct acr_r352_lsf_lsb_header - LS firmware header | ||
| 40 | * @signature: signature to verify the firmware against | ||
| 41 | * @ucode_off: offset of the ucode blob in the WPR region. The ucode | ||
| 42 | * blob contains the bootloader, code and data of the | ||
| 43 | * LS falcon | ||
| 44 | * @ucode_size: size of the ucode blob, including bootloader | ||
| 45 | * @data_size: size of the ucode blob data | ||
| 46 | * @bl_code_size: size of the bootloader code | ||
| 47 | * @bl_imem_off: offset in imem of the bootloader | ||
| 48 | * @bl_data_off: offset of the bootloader data in WPR region | ||
| 49 | * @bl_data_size: size of the bootloader data | ||
| 50 | * @app_code_off: offset of the app code relative to ucode_off | ||
| 51 | * @app_code_size: size of the app code | ||
| 52 | * @app_data_off: offset of the app data relative to ucode_off | ||
| 53 | * @app_data_size: size of the app data | ||
| 54 | * @flags: flags for the secure bootloader | ||
| 55 | * | ||
| 56 | * This structure is written into the WPR region for each managed falcon. Each | ||
| 57 | * instance is referenced by the lsb_offset member of the corresponding | ||
| 58 | * lsf_wpr_header. | ||
| 59 | */ | ||
| 60 | struct acr_r352_lsf_lsb_header { | ||
| 61 | /** | ||
| 62 | * LS falcon signatures | ||
| 63 | * @prd_keys: signature to use in production mode | ||
| 64 | * @dgb_keys: signature to use in debug mode | ||
| 65 | * @b_prd_present: whether the production key is present | ||
| 66 | * @b_dgb_present: whether the debug key is present | ||
| 67 | * @falcon_id: ID of the falcon the ucode applies to | ||
| 68 | */ | ||
| 69 | struct { | ||
| 70 | u8 prd_keys[2][16]; | ||
| 71 | u8 dbg_keys[2][16]; | ||
| 72 | u32 b_prd_present; | ||
| 73 | u32 b_dbg_present; | ||
| 74 | u32 falcon_id; | ||
| 75 | } signature; | ||
| 76 | u32 ucode_off; | ||
| 77 | u32 ucode_size; | ||
| 78 | u32 data_size; | ||
| 79 | u32 bl_code_size; | ||
| 80 | u32 bl_imem_off; | ||
| 81 | u32 bl_data_off; | ||
| 82 | u32 bl_data_size; | ||
| 83 | u32 app_code_off; | ||
| 84 | u32 app_code_size; | ||
| 85 | u32 app_data_off; | ||
| 86 | u32 app_data_size; | ||
| 87 | u32 flags; | ||
| 88 | #define LSF_FLAG_LOAD_CODE_AT_0 1 | ||
| 89 | #define LSF_FLAG_DMACTL_REQ_CTX 4 | ||
| 90 | #define LSF_FLAG_FORCE_PRIV_LOAD 8 | ||
| 91 | }; | ||
| 92 | |||
| 93 | /** | ||
| 94 | * struct acr_r352_lsf_wpr_header - LS blob WPR Header | ||
| 95 | * @falcon_id: LS falcon ID | ||
| 96 | * @lsb_offset: offset of the lsb_lsf_header in the WPR region | ||
| 97 | * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon | ||
| 98 | * @lazy_bootstrap: skip bootstrapping by ACR | ||
| 99 | * @status: bootstrapping status | ||
| 100 | * | ||
| 101 | * An array of these is written at the beginning of the WPR region, one for | ||
| 102 | * each managed falcon. The array is terminated by an instance which falcon_id | ||
| 103 | * is LSF_FALCON_ID_INVALID. | ||
| 104 | */ | ||
| 105 | struct acr_r352_lsf_wpr_header { | ||
| 106 | u32 falcon_id; | ||
| 107 | u32 lsb_offset; | ||
| 108 | u32 bootstrap_owner; | ||
| 109 | u32 lazy_bootstrap; | ||
| 110 | u32 status; | ||
| 111 | #define LSF_IMAGE_STATUS_NONE 0 | ||
| 112 | #define LSF_IMAGE_STATUS_COPY 1 | ||
| 113 | #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 | ||
| 114 | #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 | ||
| 115 | #define LSF_IMAGE_STATUS_VALIDATION_DONE 4 | ||
| 116 | #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 | ||
| 117 | #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 | ||
| 118 | }; | ||
| 119 | |||
| 120 | /** | ||
| 121 | * struct ls_ucode_img_r352 - ucode image augmented with r352 headers | ||
| 122 | */ | ||
| 123 | struct ls_ucode_img_r352 { | ||
| 124 | struct ls_ucode_img base; | ||
| 125 | |||
| 126 | struct acr_r352_lsf_wpr_header wpr_header; | ||
| 127 | struct acr_r352_lsf_lsb_header lsb_header; | ||
| 128 | }; | ||
| 129 | #define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base) | ||
| 130 | |||
| 131 | |||
| 132 | /* | ||
| 133 | * HS blob structures | ||
| 134 | */ | ||
| 135 | |||
| 136 | struct hsf_load_header_app { | ||
| 137 | u32 sec_code_off; | ||
| 138 | u32 sec_code_size; | ||
| 139 | }; | ||
| 140 | |||
| 141 | /** | ||
| 142 | * struct hsf_load_header - HS firmware load header | ||
| 143 | */ | ||
| 144 | struct hsf_load_header { | ||
| 145 | u32 non_sec_code_off; | ||
| 146 | u32 non_sec_code_size; | ||
| 147 | u32 data_dma_base; | ||
| 148 | u32 data_size; | ||
| 149 | u32 num_apps; | ||
| 150 | struct hsf_load_header_app app[0]; | ||
| 151 | }; | ||
| 152 | |||
| 153 | /** | ||
| 154 | * struct acr_r352_ls_func - manages a single LS firmware | ||
| 155 | * | ||
| 156 | * @load: load the external firmware into a ls_ucode_img | ||
| 157 | * @generate_bl_desc: function called on a block of bl_desc_size to generate the | ||
| 158 | * proper bootloader descriptor for this LS firmware | ||
| 159 | * @bl_desc_size: size of the bootloader descriptor | ||
| 160 | * @lhdr_flags: LS flags | ||
| 161 | */ | ||
| 162 | struct acr_r352_ls_func { | ||
| 163 | int (*load)(const struct nvkm_subdev *, struct ls_ucode_img *); | ||
| 164 | void (*generate_bl_desc)(const struct nvkm_acr *, | ||
| 165 | const struct ls_ucode_img *, u64, void *); | ||
| 166 | u32 bl_desc_size; | ||
| 167 | u32 lhdr_flags; | ||
| 168 | }; | ||
| 169 | |||
| 170 | struct acr_r352; | ||
| 171 | |||
| 172 | /** | ||
| 173 | * struct acr_r352_func - manages nuances between ACR versions | ||
| 174 | * | ||
| 175 | * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate | ||
| 176 | * the proper HS bootloader descriptor | ||
| 177 | * @hs_bl_desc_size: size of the HS bootloader descriptor | ||
| 178 | */ | ||
| 179 | struct acr_r352_func { | ||
| 180 | void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *, | ||
| 181 | u64); | ||
| 182 | u32 hs_bl_desc_size; | ||
| 183 | |||
| 184 | struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *, | ||
| 185 | enum nvkm_secboot_falcon); | ||
| 186 | int (*ls_fill_headers)(struct acr_r352 *, struct list_head *); | ||
| 187 | int (*ls_write_wpr)(struct acr_r352 *, struct list_head *, | ||
| 188 | struct nvkm_gpuobj *, u32); | ||
| 189 | |||
| 190 | const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END]; | ||
| 191 | }; | ||
| 192 | |||
| 193 | /** | ||
| 194 | * struct acr_r352 - ACR data for driver release 352 (and beyond) | ||
| 195 | */ | ||
| 196 | struct acr_r352 { | ||
| 197 | struct nvkm_acr base; | ||
| 198 | const struct acr_r352_func *func; | ||
| 199 | |||
| 200 | /* | ||
| 201 | * HS FW - lock WPR region (dGPU only) and load LS FWs | ||
| 202 | * on Tegra the HS FW copies the LS blob into the fixed WPR instead | ||
| 203 | */ | ||
| 204 | struct nvkm_gpuobj *load_blob; | ||
| 205 | struct { | ||
| 206 | struct hsf_load_header load_bl_header; | ||
| 207 | struct hsf_load_header_app __load_apps[ACR_R352_MAX_APPS]; | ||
| 208 | }; | ||
| 209 | |||
| 210 | /* HS FW - unlock WPR region (dGPU only) */ | ||
| 211 | struct nvkm_gpuobj *unload_blob; | ||
| 212 | struct { | ||
| 213 | struct hsf_load_header unload_bl_header; | ||
| 214 | struct hsf_load_header_app __unload_apps[ACR_R352_MAX_APPS]; | ||
| 215 | }; | ||
| 216 | |||
| 217 | /* HS bootloader */ | ||
| 218 | void *hsbl_blob; | ||
| 219 | |||
| 220 | /* LS FWs, to be loaded by the HS ACR */ | ||
| 221 | struct nvkm_gpuobj *ls_blob; | ||
| 222 | |||
| 223 | /* Firmware already loaded? */ | ||
| 224 | bool firmware_ok; | ||
| 225 | |||
| 226 | /* Falcons to lazy-bootstrap */ | ||
| 227 | u32 lazy_bootstrap; | ||
| 228 | |||
| 229 | /* To keep track of the state of all managed falcons */ | ||
| 230 | enum { | ||
| 231 | /* In non-secure state, no firmware loaded, no privileges*/ | ||
| 232 | NON_SECURE = 0, | ||
| 233 | /* In low-secure mode and ready to be started */ | ||
| 234 | RESET, | ||
| 235 | /* In low-secure mode and running */ | ||
| 236 | RUNNING, | ||
| 237 | } falcon_state[NVKM_SECBOOT_FALCON_END]; | ||
| 238 | }; | ||
| 239 | #define acr_r352(acr) container_of(acr, struct acr_r352, base) | ||
| 240 | |||
| 241 | struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *, | ||
| 242 | enum nvkm_secboot_falcon, unsigned long); | ||
| 243 | |||
| 244 | struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *, | ||
| 245 | enum nvkm_secboot_falcon); | ||
| 246 | int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *); | ||
| 247 | int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *, | ||
| 248 | struct nvkm_gpuobj *, u32); | ||
| 249 | |||
| 250 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c new file mode 100644 index 000000000000..f0aff1d98474 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c | |||
| @@ -0,0 +1,138 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include "acr_r352.h" | ||
| 24 | |||
| 25 | #include <engine/falcon.h> | ||
| 26 | |||
| 27 | /** | ||
| 28 | * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor | ||
| 29 | * @signature: 16B signature for secure code. 0s if no secure code | ||
| 30 | * @ctx_dma: DMA context to be used by BL while loading code/data | ||
| 31 | * @code_dma_base: 256B-aligned Physical FB Address where code is located | ||
| 32 | * (falcon's $xcbase register) | ||
| 33 | * @non_sec_code_off: offset from code_dma_base where the non-secure code is | ||
| 34 | * located. The offset must be multiple of 256 to help perf | ||
| 35 | * @non_sec_code_size: the size of the nonSecure code part. | ||
| 36 | * @sec_code_off: offset from code_dma_base where the secure code is | ||
| 37 | * located. The offset must be multiple of 256 to help perf | ||
| 38 | * @sec_code_size: offset from code_dma_base where the secure code is | ||
| 39 | * located. The offset must be multiple of 256 to help perf | ||
| 40 | * @code_entry_point: code entry point which will be invoked by BL after | ||
| 41 | * code is loaded. | ||
| 42 | * @data_dma_base: 256B aligned Physical FB Address where data is located. | ||
| 43 | * (falcon's $xdbase register) | ||
| 44 | * @data_size: size of data block. Should be multiple of 256B | ||
| 45 | * | ||
| 46 | * Structure used by the bootloader to load the rest of the code. This has | ||
| 47 | * to be filled by host and copied into DMEM at offset provided in the | ||
| 48 | * hsflcn_bl_desc.bl_desc_dmem_load_off. | ||
| 49 | */ | ||
| 50 | struct acr_r361_flcn_bl_desc { | ||
| 51 | u32 reserved[4]; | ||
| 52 | u32 signature[4]; | ||
| 53 | u32 ctx_dma; | ||
| 54 | struct flcn_u64 code_dma_base; | ||
| 55 | u32 non_sec_code_off; | ||
| 56 | u32 non_sec_code_size; | ||
| 57 | u32 sec_code_off; | ||
| 58 | u32 sec_code_size; | ||
| 59 | u32 code_entry_point; | ||
| 60 | struct flcn_u64 data_dma_base; | ||
| 61 | u32 data_size; | ||
| 62 | }; | ||
| 63 | |||
| 64 | static void | ||
| 65 | acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr, | ||
| 66 | const struct ls_ucode_img *_img, u64 wpr_addr, | ||
| 67 | void *_desc) | ||
| 68 | { | ||
| 69 | struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); | ||
| 70 | struct acr_r361_flcn_bl_desc *desc = _desc; | ||
| 71 | const struct ls_ucode_img_desc *pdesc = &img->base.ucode_desc; | ||
| 72 | u64 base, addr_code, addr_data; | ||
| 73 | |||
| 74 | base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset; | ||
| 75 | addr_code = base + pdesc->app_resident_code_offset; | ||
| 76 | addr_data = base + pdesc->app_resident_data_offset; | ||
| 77 | |||
| 78 | desc->ctx_dma = FALCON_DMAIDX_UCODE; | ||
| 79 | desc->code_dma_base = u64_to_flcn64(addr_code); | ||
| 80 | desc->non_sec_code_off = pdesc->app_resident_code_offset; | ||
| 81 | desc->non_sec_code_size = pdesc->app_resident_code_size; | ||
| 82 | desc->code_entry_point = pdesc->app_imem_entry; | ||
| 83 | desc->data_dma_base = u64_to_flcn64(addr_data); | ||
| 84 | desc->data_size = pdesc->app_resident_data_size; | ||
| 85 | } | ||
| 86 | |||
| 87 | static void | ||
| 88 | acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, | ||
| 89 | u64 offset) | ||
| 90 | { | ||
| 91 | struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc; | ||
| 92 | |||
| 93 | bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; | ||
| 94 | bl_desc->code_dma_base = u64_to_flcn64(offset); | ||
| 95 | bl_desc->non_sec_code_off = hdr->non_sec_code_off; | ||
| 96 | bl_desc->non_sec_code_size = hdr->non_sec_code_size; | ||
| 97 | bl_desc->sec_code_off = hdr->app[0].sec_code_off; | ||
| 98 | bl_desc->sec_code_size = hdr->app[0].sec_code_size; | ||
| 99 | bl_desc->code_entry_point = 0; | ||
| 100 | bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); | ||
| 101 | bl_desc->data_size = hdr->data_size; | ||
| 102 | } | ||
| 103 | |||
| 104 | const struct acr_r352_ls_func | ||
| 105 | acr_r361_ls_fecs_func = { | ||
| 106 | .load = acr_ls_ucode_load_fecs, | ||
| 107 | .generate_bl_desc = acr_r361_generate_flcn_bl_desc, | ||
| 108 | .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), | ||
| 109 | }; | ||
| 110 | |||
| 111 | const struct acr_r352_ls_func | ||
| 112 | acr_r361_ls_gpccs_func = { | ||
| 113 | .load = acr_ls_ucode_load_gpccs, | ||
| 114 | .generate_bl_desc = acr_r361_generate_flcn_bl_desc, | ||
| 115 | .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), | ||
| 116 | /* GPCCS will be loaded using PRI */ | ||
| 117 | .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, | ||
| 118 | }; | ||
| 119 | |||
| 120 | const struct acr_r352_func | ||
| 121 | acr_r361_func = { | ||
| 122 | .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, | ||
| 123 | .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), | ||
| 124 | .ls_ucode_img_load = acr_r352_ls_ucode_img_load, | ||
| 125 | .ls_fill_headers = acr_r352_ls_fill_headers, | ||
| 126 | .ls_write_wpr = acr_r352_ls_write_wpr, | ||
| 127 | .ls_func = { | ||
| 128 | [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, | ||
| 129 | [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, | ||
| 130 | }, | ||
| 131 | }; | ||
| 132 | |||
| 133 | struct nvkm_acr * | ||
| 134 | acr_r361_new(unsigned long managed_falcons) | ||
| 135 | { | ||
| 136 | return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU, | ||
| 137 | managed_falcons); | ||
| 138 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c index 314be2192b7d..27c9dfffb9a6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c | |||
| @@ -19,184 +19,108 @@ | |||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 20 | * DEALINGS IN THE SOFTWARE. | 20 | * DEALINGS IN THE SOFTWARE. |
| 21 | */ | 21 | */ |
| 22 | |||
| 23 | /* | ||
| 24 | * Secure boot is the process by which NVIDIA-signed firmware is loaded into | ||
| 25 | * some of the falcons of a GPU. For production devices this is the only way | ||
| 26 | * for the firmware to access useful (but sensitive) registers. | ||
| 27 | * | ||
| 28 | * A Falcon microprocessor supporting advanced security modes can run in one of | ||
| 29 | * three modes: | ||
| 30 | * | ||
| 31 | * - Non-secure (NS). In this mode, functionality is similar to Falcon | ||
| 32 | * architectures before security modes were introduced (pre-Maxwell), but | ||
| 33 | * capability is restricted. In particular, certain registers may be | ||
| 34 | * inaccessible for reads and/or writes, and physical memory access may be | ||
| 35 | * disabled (on certain Falcon instances). This is the only possible mode that | ||
| 36 | * can be used if you don't have microcode cryptographically signed by NVIDIA. | ||
| 37 | * | ||
| 38 | * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's | ||
| 39 | * not possible to read or write any Falcon internal state or Falcon registers | ||
| 40 | * from outside the Falcon (for example, from the host system). The only way | ||
| 41 | * to enable this mode is by loading microcode that has been signed by NVIDIA. | ||
| 42 | * (The loading process involves tagging the IMEM block as secure, writing the | ||
| 43 | * signature into a Falcon register, and starting execution. The hardware will | ||
| 44 | * validate the signature, and if valid, grant HS privileges.) | ||
| 45 | * | ||
| 46 | * - Light Secure (LS). In this mode, the microprocessor has more privileges | ||
| 47 | * than NS but fewer than HS. Some of the microprocessor state is visible to | ||
| 48 | * host software to ease debugging. The only way to enable this mode is by HS | ||
| 49 | * microcode enabling LS mode. Some privileges available to HS mode are not | ||
| 50 | * available here. LS mode is introduced in GM20x. | ||
| 51 | * | ||
| 52 | * Secure boot consists in temporarily switching a HS-capable falcon (typically | ||
| 53 | * PMU) into HS mode in order to validate the LS firmwares of managed falcons, | ||
| 54 | * load them, and switch managed falcons into LS mode. Once secure boot | ||
| 55 | * completes, no falcon remains in HS mode. | ||
| 56 | * | ||
| 57 | * Secure boot requires a write-protected memory region (WPR) which can only be | ||
| 58 | * written by the secure falcon. On dGPU, the driver sets up the WPR region in | ||
| 59 | * video memory. On Tegra, it is set up by the bootloader and its location and | ||
| 60 | * size written into memory controller registers. | ||
| 61 | * | ||
| 62 | * The secure boot process takes place as follows: | ||
| 63 | * | ||
| 64 | * 1) A LS blob is constructed that contains all the LS firmwares we want to | ||
| 65 | * load, along with their signatures and bootloaders. | ||
| 66 | * | ||
| 67 | * 2) A HS blob (also called ACR) is created that contains the signed HS | ||
| 68 | * firmware in charge of loading the LS firmwares into their respective | ||
| 69 | * falcons. | ||
| 70 | * | ||
| 71 | * 3) The HS blob is loaded (via its own bootloader) and executed on the | ||
| 72 | * HS-capable falcon. It authenticates itself, switches the secure falcon to | ||
| 73 | * HS mode and setup the WPR region around the LS blob (dGPU) or copies the | ||
| 74 | * LS blob into the WPR region (Tegra). | ||
| 75 | * | ||
| 76 | * 4) The LS blob is now secure from all external tampering. The HS falcon | ||
| 77 | * checks the signatures of the LS firmwares and, if valid, switches the | ||
| 78 | * managed falcons to LS mode and makes them ready to run the LS firmware. | ||
| 79 | * | ||
| 80 | * 5) The managed falcons remain in LS mode and can be started. | ||
| 81 | * | ||
| 82 | */ | ||
| 83 | |||
| 22 | #include "priv.h" | 84 | #include "priv.h" |
| 85 | #include "acr.h" | ||
| 23 | 86 | ||
| 24 | #include <subdev/mc.h> | 87 | #include <subdev/mc.h> |
| 25 | #include <subdev/timer.h> | 88 | #include <subdev/timer.h> |
| 89 | #include <subdev/pmu.h> | ||
| 26 | 90 | ||
| 27 | static const char * | 91 | const char * |
| 28 | managed_falcons_names[] = { | 92 | nvkm_secboot_falcon_name[] = { |
| 29 | [NVKM_SECBOOT_FALCON_PMU] = "PMU", | 93 | [NVKM_SECBOOT_FALCON_PMU] = "PMU", |
| 30 | [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>", | 94 | [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>", |
| 31 | [NVKM_SECBOOT_FALCON_FECS] = "FECS", | 95 | [NVKM_SECBOOT_FALCON_FECS] = "FECS", |
| 32 | [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", | 96 | [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", |
| 33 | [NVKM_SECBOOT_FALCON_END] = "<invalid>", | 97 | [NVKM_SECBOOT_FALCON_END] = "<invalid>", |
| 34 | }; | 98 | }; |
| 35 | |||
| 36 | /* | ||
| 37 | * Helper falcon functions | ||
| 38 | */ | ||
| 39 | |||
| 40 | static int | ||
| 41 | falcon_clear_halt_interrupt(struct nvkm_device *device, u32 base) | ||
| 42 | { | ||
| 43 | int ret; | ||
| 44 | |||
| 45 | /* clear halt interrupt */ | ||
| 46 | nvkm_mask(device, base + 0x004, 0x10, 0x10); | ||
| 47 | /* wait until halt interrupt is cleared */ | ||
| 48 | ret = nvkm_wait_msec(device, 10, base + 0x008, 0x10, 0x0); | ||
| 49 | if (ret < 0) | ||
| 50 | return ret; | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int | ||
| 56 | falcon_wait_idle(struct nvkm_device *device, u32 base) | ||
| 57 | { | ||
| 58 | int ret; | ||
| 59 | |||
| 60 | ret = nvkm_wait_msec(device, 10, base + 0x04c, 0xffff, 0x0); | ||
| 61 | if (ret < 0) | ||
| 62 | return ret; | ||
| 63 | |||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | |||
| 67 | static int | ||
| 68 | nvkm_secboot_falcon_enable(struct nvkm_secboot *sb) | ||
| 69 | { | ||
| 70 | struct nvkm_device *device = sb->subdev.device; | ||
| 71 | int ret; | ||
| 72 | |||
| 73 | /* enable engine */ | ||
| 74 | nvkm_mc_enable(device, sb->devidx); | ||
| 75 | ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0); | ||
| 76 | if (ret < 0) { | ||
| 77 | nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n"); | ||
| 78 | nvkm_mc_disable(device, sb->devidx); | ||
| 79 | return ret; | ||
| 80 | } | ||
| 81 | |||
| 82 | ret = falcon_wait_idle(device, sb->base); | ||
| 83 | if (ret) | ||
| 84 | return ret; | ||
| 85 | |||
| 86 | /* enable IRQs */ | ||
| 87 | nvkm_wr32(device, sb->base + 0x010, 0xff); | ||
| 88 | nvkm_mc_intr_mask(device, sb->devidx, true); | ||
| 89 | |||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | static int | ||
| 94 | nvkm_secboot_falcon_disable(struct nvkm_secboot *sb) | ||
| 95 | { | ||
| 96 | struct nvkm_device *device = sb->subdev.device; | ||
| 97 | |||
| 98 | /* disable IRQs and wait for any previous code to complete */ | ||
| 99 | nvkm_mc_intr_mask(device, sb->devidx, false); | ||
| 100 | nvkm_wr32(device, sb->base + 0x014, 0xff); | ||
| 101 | |||
| 102 | falcon_wait_idle(device, sb->base); | ||
| 103 | |||
| 104 | /* disable engine */ | ||
| 105 | nvkm_mc_disable(device, sb->devidx); | ||
| 106 | |||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | |||
| 110 | int | ||
| 111 | nvkm_secboot_falcon_reset(struct nvkm_secboot *sb) | ||
| 112 | { | ||
| 113 | int ret; | ||
| 114 | |||
| 115 | ret = nvkm_secboot_falcon_disable(sb); | ||
| 116 | if (ret) | ||
| 117 | return ret; | ||
| 118 | |||
| 119 | ret = nvkm_secboot_falcon_enable(sb); | ||
| 120 | if (ret) | ||
| 121 | return ret; | ||
| 122 | |||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | /** | ||
| 127 | * nvkm_secboot_falcon_run - run the falcon that will perform secure boot | ||
| 128 | * | ||
| 129 | * This function is to be called after all chip-specific preparations have | ||
| 130 | * been completed. It will start the falcon to perform secure boot, wait for | ||
| 131 | * it to halt, and report if an error occurred. | ||
| 132 | */ | ||
| 133 | int | ||
| 134 | nvkm_secboot_falcon_run(struct nvkm_secboot *sb) | ||
| 135 | { | ||
| 136 | struct nvkm_device *device = sb->subdev.device; | ||
| 137 | int ret; | ||
| 138 | |||
| 139 | /* Start falcon */ | ||
| 140 | nvkm_wr32(device, sb->base + 0x100, 0x2); | ||
| 141 | |||
| 142 | /* Wait for falcon halt */ | ||
| 143 | ret = nvkm_wait_msec(device, 100, sb->base + 0x100, 0x10, 0x10); | ||
| 144 | if (ret < 0) | ||
| 145 | return ret; | ||
| 146 | |||
| 147 | /* If mailbox register contains an error code, then ACR has failed */ | ||
| 148 | ret = nvkm_rd32(device, sb->base + 0x040); | ||
| 149 | if (ret) { | ||
| 150 | nvkm_error(&sb->subdev, "ACR boot failed, ret 0x%08x", ret); | ||
| 151 | falcon_clear_halt_interrupt(device, sb->base); | ||
| 152 | return -EINVAL; | ||
| 153 | } | ||
| 154 | |||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | |||
| 159 | /** | 99 | /** |
| 160 | * nvkm_secboot_reset() - reset specified falcon | 100 | * nvkm_secboot_reset() - reset specified falcon |
| 161 | */ | 101 | */ |
| 162 | int | 102 | int |
| 163 | nvkm_secboot_reset(struct nvkm_secboot *sb, u32 falcon) | 103 | nvkm_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon) |
| 164 | { | 104 | { |
| 165 | /* Unmanaged falcon? */ | 105 | /* Unmanaged falcon? */ |
| 166 | if (!(BIT(falcon) & sb->func->managed_falcons)) { | 106 | if (!(BIT(falcon) & sb->acr->managed_falcons)) { |
| 167 | nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n"); | 107 | nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n"); |
| 168 | return -EINVAL; | 108 | return -EINVAL; |
| 169 | } | 109 | } |
| 170 | 110 | ||
| 171 | return sb->func->reset(sb, falcon); | 111 | return sb->acr->func->reset(sb->acr, sb, falcon); |
| 172 | } | ||
| 173 | |||
| 174 | /** | ||
| 175 | * nvkm_secboot_start() - start specified falcon | ||
| 176 | */ | ||
| 177 | int | ||
| 178 | nvkm_secboot_start(struct nvkm_secboot *sb, u32 falcon) | ||
| 179 | { | ||
| 180 | /* Unmanaged falcon? */ | ||
| 181 | if (!(BIT(falcon) & sb->func->managed_falcons)) { | ||
| 182 | nvkm_error(&sb->subdev, "cannot start unmanaged falcon!\n"); | ||
| 183 | return -EINVAL; | ||
| 184 | } | ||
| 185 | |||
| 186 | return sb->func->start(sb, falcon); | ||
| 187 | } | 112 | } |
| 188 | 113 | ||
| 189 | /** | 114 | /** |
| 190 | * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed | 115 | * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed |
| 191 | */ | 116 | */ |
| 192 | bool | 117 | bool |
| 193 | nvkm_secboot_is_managed(struct nvkm_secboot *secboot, | 118 | nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid) |
| 194 | enum nvkm_secboot_falcon fid) | ||
| 195 | { | 119 | { |
| 196 | if (!secboot) | 120 | if (!sb) |
| 197 | return false; | 121 | return false; |
| 198 | 122 | ||
| 199 | return secboot->func->managed_falcons & BIT(fid); | 123 | return sb->acr->managed_falcons & BIT(fid); |
| 200 | } | 124 | } |
| 201 | 125 | ||
| 202 | static int | 126 | static int |
| @@ -205,9 +129,19 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev) | |||
| 205 | struct nvkm_secboot *sb = nvkm_secboot(subdev); | 129 | struct nvkm_secboot *sb = nvkm_secboot(subdev); |
| 206 | int ret = 0; | 130 | int ret = 0; |
| 207 | 131 | ||
| 132 | switch (sb->acr->boot_falcon) { | ||
| 133 | case NVKM_SECBOOT_FALCON_PMU: | ||
| 134 | sb->boot_falcon = subdev->device->pmu->falcon; | ||
| 135 | break; | ||
| 136 | default: | ||
| 137 | nvkm_error(subdev, "Unmanaged boot falcon %s!\n", | ||
| 138 | nvkm_secboot_falcon_name[sb->acr->boot_falcon]); | ||
| 139 | return -EINVAL; | ||
| 140 | } | ||
| 141 | |||
| 208 | /* Call chip-specific init function */ | 142 | /* Call chip-specific init function */ |
| 209 | if (sb->func->init) | 143 | if (sb->func->oneinit) |
| 210 | ret = sb->func->init(sb); | 144 | ret = sb->func->oneinit(sb); |
| 211 | if (ret) { | 145 | if (ret) { |
| 212 | nvkm_error(subdev, "Secure Boot initialization failed: %d\n", | 146 | nvkm_error(subdev, "Secure Boot initialization failed: %d\n", |
| 213 | ret); | 147 | ret); |
| @@ -249,7 +183,7 @@ nvkm_secboot = { | |||
| 249 | }; | 183 | }; |
| 250 | 184 | ||
| 251 | int | 185 | int |
| 252 | nvkm_secboot_ctor(const struct nvkm_secboot_func *func, | 186 | nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr, |
| 253 | struct nvkm_device *device, int index, | 187 | struct nvkm_device *device, int index, |
| 254 | struct nvkm_secboot *sb) | 188 | struct nvkm_secboot *sb) |
| 255 | { | 189 | { |
| @@ -257,22 +191,14 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func, | |||
| 257 | 191 | ||
| 258 | nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev); | 192 | nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev); |
| 259 | sb->func = func; | 193 | sb->func = func; |
| 260 | 194 | sb->acr = acr; | |
| 261 | /* setup the performing falcon's base address and masks */ | 195 | acr->subdev = &sb->subdev; |
| 262 | switch (func->boot_falcon) { | ||
| 263 | case NVKM_SECBOOT_FALCON_PMU: | ||
| 264 | sb->devidx = NVKM_SUBDEV_PMU; | ||
| 265 | sb->base = 0x10a000; | ||
| 266 | break; | ||
| 267 | default: | ||
| 268 | nvkm_error(&sb->subdev, "invalid secure boot falcon\n"); | ||
| 269 | return -EINVAL; | ||
| 270 | }; | ||
| 271 | 196 | ||
| 272 | nvkm_debug(&sb->subdev, "securely managed falcons:\n"); | 197 | nvkm_debug(&sb->subdev, "securely managed falcons:\n"); |
| 273 | for_each_set_bit(fid, &sb->func->managed_falcons, | 198 | for_each_set_bit(fid, &sb->acr->managed_falcons, |
| 274 | NVKM_SECBOOT_FALCON_END) | 199 | NVKM_SECBOOT_FALCON_END) |
| 275 | nvkm_debug(&sb->subdev, "- %s\n", managed_falcons_names[fid]); | 200 | nvkm_debug(&sb->subdev, "- %s\n", |
| 201 | nvkm_secboot_falcon_name[fid]); | ||
| 276 | 202 | ||
| 277 | return 0; | 203 | return 0; |
| 278 | } | 204 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c index ec48e4ace37a..813c4eb0b25f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c | |||
| @@ -20,1313 +20,84 @@ | |||
| 20 | * DEALINGS IN THE SOFTWARE. | 20 | * DEALINGS IN THE SOFTWARE. |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | /* | ||
| 24 | * Secure boot is the process by which NVIDIA-signed firmware is loaded into | ||
| 25 | * some of the falcons of a GPU. For production devices this is the only way | ||
| 26 | * for the firmware to access useful (but sensitive) registers. | ||
| 27 | * | ||
| 28 | * A Falcon microprocessor supporting advanced security modes can run in one of | ||
| 29 | * three modes: | ||
| 30 | * | ||
| 31 | * - Non-secure (NS). In this mode, functionality is similar to Falcon | ||
| 32 | * architectures before security modes were introduced (pre-Maxwell), but | ||
| 33 | * capability is restricted. In particular, certain registers may be | ||
| 34 | * inaccessible for reads and/or writes, and physical memory access may be | ||
| 35 | * disabled (on certain Falcon instances). This is the only possible mode that | ||
| 36 | * can be used if you don't have microcode cryptographically signed by NVIDIA. | ||
| 37 | * | ||
| 38 | * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's | ||
| 39 | * not possible to read or write any Falcon internal state or Falcon registers | ||
| 40 | * from outside the Falcon (for example, from the host system). The only way | ||
| 41 | * to enable this mode is by loading microcode that has been signed by NVIDIA. | ||
| 42 | * (The loading process involves tagging the IMEM block as secure, writing the | ||
| 43 | * signature into a Falcon register, and starting execution. The hardware will | ||
| 44 | * validate the signature, and if valid, grant HS privileges.) | ||
| 45 | * | ||
| 46 | * - Light Secure (LS). In this mode, the microprocessor has more privileges | ||
| 47 | * than NS but fewer than HS. Some of the microprocessor state is visible to | ||
| 48 | * host software to ease debugging. The only way to enable this mode is by HS | ||
| 49 | * microcode enabling LS mode. Some privileges available to HS mode are not | ||
| 50 | * available here. LS mode is introduced in GM20x. | ||
| 51 | * | ||
| 52 | * Secure boot consists in temporarily switching a HS-capable falcon (typically | ||
| 53 | * PMU) into HS mode in order to validate the LS firmwares of managed falcons, | ||
| 54 | * load them, and switch managed falcons into LS mode. Once secure boot | ||
| 55 | * completes, no falcon remains in HS mode. | ||
| 56 | * | ||
| 57 | * Secure boot requires a write-protected memory region (WPR) which can only be | ||
| 58 | * written by the secure falcon. On dGPU, the driver sets up the WPR region in | ||
| 59 | * video memory. On Tegra, it is set up by the bootloader and its location and | ||
| 60 | * size written into memory controller registers. | ||
| 61 | * | ||
| 62 | * The secure boot process takes place as follows: | ||
| 63 | * | ||
| 64 | * 1) A LS blob is constructed that contains all the LS firmwares we want to | ||
| 65 | * load, along with their signatures and bootloaders. | ||
| 66 | * | ||
| 67 | * 2) A HS blob (also called ACR) is created that contains the signed HS | ||
| 68 | * firmware in charge of loading the LS firmwares into their respective | ||
| 69 | * falcons. | ||
| 70 | * | ||
| 71 | * 3) The HS blob is loaded (via its own bootloader) and executed on the | ||
| 72 | * HS-capable falcon. It authenticates itself, switches the secure falcon to | ||
| 73 | * HS mode and setup the WPR region around the LS blob (dGPU) or copies the | ||
| 74 | * LS blob into the WPR region (Tegra). | ||
| 75 | * | ||
| 76 | * 4) The LS blob is now secure from all external tampering. The HS falcon | ||
| 77 | * checks the signatures of the LS firmwares and, if valid, switches the | ||
| 78 | * managed falcons to LS mode and makes them ready to run the LS firmware. | ||
| 79 | * | ||
| 80 | * 5) The managed falcons remain in LS mode and can be started. | ||
| 81 | * | ||
| 82 | */ | ||
| 83 | 23 | ||
| 84 | #include "priv.h" | 24 | #include "acr.h" |
| 25 | #include "gm200.h" | ||
| 85 | 26 | ||
| 86 | #include <core/gpuobj.h> | 27 | #include <core/gpuobj.h> |
| 87 | #include <core/firmware.h> | ||
| 88 | #include <subdev/fb.h> | 28 | #include <subdev/fb.h> |
| 89 | 29 | #include <engine/falcon.h> | |
| 90 | enum { | 30 | #include <subdev/mc.h> |
| 91 | FALCON_DMAIDX_UCODE = 0, | ||
| 92 | FALCON_DMAIDX_VIRT = 1, | ||
| 93 | FALCON_DMAIDX_PHYS_VID = 2, | ||
| 94 | FALCON_DMAIDX_PHYS_SYS_COH = 3, | ||
| 95 | FALCON_DMAIDX_PHYS_SYS_NCOH = 4, | ||
| 96 | }; | ||
| 97 | |||
| 98 | /** | ||
| 99 | * struct fw_bin_header - header of firmware files | ||
| 100 | * @bin_magic: always 0x3b1d14f0 | ||
| 101 | * @bin_ver: version of the bin format | ||
| 102 | * @bin_size: entire image size including this header | ||
| 103 | * @header_offset: offset of the firmware/bootloader header in the file | ||
| 104 | * @data_offset: offset of the firmware/bootloader payload in the file | ||
| 105 | * @data_size: size of the payload | ||
| 106 | * | ||
| 107 | * This header is located at the beginning of the HS firmware and HS bootloader | ||
| 108 | * files, to describe where the headers and data can be found. | ||
| 109 | */ | ||
| 110 | struct fw_bin_header { | ||
| 111 | u32 bin_magic; | ||
| 112 | u32 bin_ver; | ||
| 113 | u32 bin_size; | ||
| 114 | u32 header_offset; | ||
| 115 | u32 data_offset; | ||
| 116 | u32 data_size; | ||
| 117 | }; | ||
| 118 | |||
| 119 | /** | ||
| 120 | * struct fw_bl_desc - firmware bootloader descriptor | ||
| 121 | * @start_tag: starting tag of bootloader | ||
| 122 | * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc | ||
| 123 | * @code_off: offset of code section | ||
| 124 | * @code_size: size of code section | ||
| 125 | * @data_off: offset of data section | ||
| 126 | * @data_size: size of data section | ||
| 127 | * | ||
| 128 | * This structure is embedded in bootloader firmware files at to describe the | ||
| 129 | * IMEM and DMEM layout expected by the bootloader. | ||
| 130 | */ | ||
| 131 | struct fw_bl_desc { | ||
| 132 | u32 start_tag; | ||
| 133 | u32 dmem_load_off; | ||
| 134 | u32 code_off; | ||
| 135 | u32 code_size; | ||
| 136 | u32 data_off; | ||
| 137 | u32 data_size; | ||
| 138 | }; | ||
| 139 | |||
| 140 | |||
| 141 | /* | ||
| 142 | * | ||
| 143 | * LS blob structures | ||
| 144 | * | ||
| 145 | */ | ||
| 146 | |||
| 147 | /** | ||
| 148 | * struct lsf_ucode_desc - LS falcon signatures | ||
| 149 | * @prd_keys: signature to use when the GPU is in production mode | ||
| 150 | * @dgb_keys: signature to use when the GPU is in debug mode | ||
| 151 | * @b_prd_present: whether the production key is present | ||
| 152 | * @b_dgb_present: whether the debug key is present | ||
| 153 | * @falcon_id: ID of the falcon the ucode applies to | ||
| 154 | * | ||
| 155 | * Directly loaded from a signature file. | ||
| 156 | */ | ||
| 157 | struct lsf_ucode_desc { | ||
| 158 | u8 prd_keys[2][16]; | ||
| 159 | u8 dbg_keys[2][16]; | ||
| 160 | u32 b_prd_present; | ||
| 161 | u32 b_dbg_present; | ||
| 162 | u32 falcon_id; | ||
| 163 | }; | ||
| 164 | |||
| 165 | /** | ||
| 166 | * struct lsf_lsb_header - LS firmware header | ||
| 167 | * @signature: signature to verify the firmware against | ||
| 168 | * @ucode_off: offset of the ucode blob in the WPR region. The ucode | ||
| 169 | * blob contains the bootloader, code and data of the | ||
| 170 | * LS falcon | ||
| 171 | * @ucode_size: size of the ucode blob, including bootloader | ||
| 172 | * @data_size: size of the ucode blob data | ||
| 173 | * @bl_code_size: size of the bootloader code | ||
| 174 | * @bl_imem_off: offset in imem of the bootloader | ||
| 175 | * @bl_data_off: offset of the bootloader data in WPR region | ||
| 176 | * @bl_data_size: size of the bootloader data | ||
| 177 | * @app_code_off: offset of the app code relative to ucode_off | ||
| 178 | * @app_code_size: size of the app code | ||
| 179 | * @app_data_off: offset of the app data relative to ucode_off | ||
| 180 | * @app_data_size: size of the app data | ||
| 181 | * @flags: flags for the secure bootloader | ||
| 182 | * | ||
| 183 | * This structure is written into the WPR region for each managed falcon. Each | ||
| 184 | * instance is referenced by the lsb_offset member of the corresponding | ||
| 185 | * lsf_wpr_header. | ||
| 186 | */ | ||
| 187 | struct lsf_lsb_header { | ||
| 188 | struct lsf_ucode_desc signature; | ||
| 189 | u32 ucode_off; | ||
| 190 | u32 ucode_size; | ||
| 191 | u32 data_size; | ||
| 192 | u32 bl_code_size; | ||
| 193 | u32 bl_imem_off; | ||
| 194 | u32 bl_data_off; | ||
| 195 | u32 bl_data_size; | ||
| 196 | u32 app_code_off; | ||
| 197 | u32 app_code_size; | ||
| 198 | u32 app_data_off; | ||
| 199 | u32 app_data_size; | ||
| 200 | u32 flags; | ||
| 201 | #define LSF_FLAG_LOAD_CODE_AT_0 1 | ||
| 202 | #define LSF_FLAG_DMACTL_REQ_CTX 4 | ||
| 203 | #define LSF_FLAG_FORCE_PRIV_LOAD 8 | ||
| 204 | }; | ||
| 205 | |||
| 206 | /** | ||
| 207 | * struct lsf_wpr_header - LS blob WPR Header | ||
| 208 | * @falcon_id: LS falcon ID | ||
| 209 | * @lsb_offset: offset of the lsb_lsf_header in the WPR region | ||
| 210 | * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon | ||
| 211 | * @lazy_bootstrap: skip bootstrapping by ACR | ||
| 212 | * @status: bootstrapping status | ||
| 213 | * | ||
| 214 | * An array of these is written at the beginning of the WPR region, one for | ||
| 215 | * each managed falcon. The array is terminated by an instance which falcon_id | ||
| 216 | * is LSF_FALCON_ID_INVALID. | ||
| 217 | */ | ||
| 218 | struct lsf_wpr_header { | ||
| 219 | u32 falcon_id; | ||
| 220 | u32 lsb_offset; | ||
| 221 | u32 bootstrap_owner; | ||
| 222 | u32 lazy_bootstrap; | ||
| 223 | u32 status; | ||
| 224 | #define LSF_IMAGE_STATUS_NONE 0 | ||
| 225 | #define LSF_IMAGE_STATUS_COPY 1 | ||
| 226 | #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 | ||
| 227 | #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 | ||
| 228 | #define LSF_IMAGE_STATUS_VALIDATION_DONE 4 | ||
| 229 | #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 | ||
| 230 | #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 | ||
| 231 | }; | ||
| 232 | |||
| 233 | |||
| 234 | /** | ||
| 235 | * struct ls_ucode_img_desc - descriptor of firmware image | ||
| 236 | * @descriptor_size: size of this descriptor | ||
| 237 | * @image_size: size of the whole image | ||
| 238 | * @bootloader_start_offset: start offset of the bootloader in ucode image | ||
| 239 | * @bootloader_size: size of the bootloader | ||
| 240 | * @bootloader_imem_offset: start off set of the bootloader in IMEM | ||
| 241 | * @bootloader_entry_point: entry point of the bootloader in IMEM | ||
| 242 | * @app_start_offset: start offset of the LS firmware | ||
| 243 | * @app_size: size of the LS firmware's code and data | ||
| 244 | * @app_imem_offset: offset of the app in IMEM | ||
| 245 | * @app_imem_entry: entry point of the app in IMEM | ||
| 246 | * @app_dmem_offset: offset of the data in DMEM | ||
| 247 | * @app_resident_code_offset: offset of app code from app_start_offset | ||
| 248 | * @app_resident_code_size: size of the code | ||
| 249 | * @app_resident_data_offset: offset of data from app_start_offset | ||
| 250 | * @app_resident_data_size: size of data | ||
| 251 | * | ||
| 252 | * A firmware image contains the code, data, and bootloader of a given LS | ||
| 253 | * falcon in a single blob. This structure describes where everything is. | ||
| 254 | * | ||
| 255 | * This can be generated from a (bootloader, code, data) set if they have | ||
| 256 | * been loaded separately, or come directly from a file. | ||
| 257 | */ | ||
| 258 | struct ls_ucode_img_desc { | ||
| 259 | u32 descriptor_size; | ||
| 260 | u32 image_size; | ||
| 261 | u32 tools_version; | ||
| 262 | u32 app_version; | ||
| 263 | char date[64]; | ||
| 264 | u32 bootloader_start_offset; | ||
| 265 | u32 bootloader_size; | ||
| 266 | u32 bootloader_imem_offset; | ||
| 267 | u32 bootloader_entry_point; | ||
| 268 | u32 app_start_offset; | ||
| 269 | u32 app_size; | ||
| 270 | u32 app_imem_offset; | ||
| 271 | u32 app_imem_entry; | ||
| 272 | u32 app_dmem_offset; | ||
| 273 | u32 app_resident_code_offset; | ||
| 274 | u32 app_resident_code_size; | ||
| 275 | u32 app_resident_data_offset; | ||
| 276 | u32 app_resident_data_size; | ||
| 277 | u32 nb_overlays; | ||
| 278 | struct {u32 start; u32 size; } load_ovl[64]; | ||
| 279 | u32 compressed; | ||
| 280 | }; | ||
| 281 | |||
| 282 | /** | ||
| 283 | * struct ls_ucode_img - temporary storage for loaded LS firmwares | ||
| 284 | * @node: to link within lsf_ucode_mgr | ||
| 285 | * @falcon_id: ID of the falcon this LS firmware is for | ||
| 286 | * @ucode_desc: loaded or generated map of ucode_data | ||
| 287 | * @ucode_header: header of the firmware | ||
| 288 | * @ucode_data: firmware payload (code and data) | ||
| 289 | * @ucode_size: size in bytes of data in ucode_data | ||
| 290 | * @wpr_header: WPR header to be written to the LS blob | ||
| 291 | * @lsb_header: LSB header to be written to the LS blob | ||
| 292 | * | ||
| 293 | * Preparing the WPR LS blob requires information about all the LS firmwares | ||
| 294 | * (size, etc) to be known. This structure contains all the data of one LS | ||
| 295 | * firmware. | ||
| 296 | */ | ||
| 297 | struct ls_ucode_img { | ||
| 298 | struct list_head node; | ||
| 299 | enum nvkm_secboot_falcon falcon_id; | ||
| 300 | |||
| 301 | struct ls_ucode_img_desc ucode_desc; | ||
| 302 | u32 *ucode_header; | ||
| 303 | u8 *ucode_data; | ||
| 304 | u32 ucode_size; | ||
| 305 | |||
| 306 | struct lsf_wpr_header wpr_header; | ||
| 307 | struct lsf_lsb_header lsb_header; | ||
| 308 | }; | ||
| 309 | |||
| 310 | /** | ||
| 311 | * struct ls_ucode_mgr - manager for all LS falcon firmwares | ||
| 312 | * @count: number of managed LS falcons | ||
| 313 | * @wpr_size: size of the required WPR region in bytes | ||
| 314 | * @img_list: linked list of lsf_ucode_img | ||
| 315 | */ | ||
| 316 | struct ls_ucode_mgr { | ||
| 317 | u16 count; | ||
| 318 | u32 wpr_size; | ||
| 319 | struct list_head img_list; | ||
| 320 | }; | ||
| 321 | |||
| 322 | |||
| 323 | /* | ||
| 324 | * | ||
| 325 | * HS blob structures | ||
| 326 | * | ||
| 327 | */ | ||
| 328 | |||
| 329 | /** | ||
| 330 | * struct hsf_fw_header - HS firmware descriptor | ||
| 331 | * @sig_dbg_offset: offset of the debug signature | ||
| 332 | * @sig_dbg_size: size of the debug signature | ||
| 333 | * @sig_prod_offset: offset of the production signature | ||
| 334 | * @sig_prod_size: size of the production signature | ||
| 335 | * @patch_loc: offset of the offset (sic) of where the signature is | ||
| 336 | * @patch_sig: offset of the offset (sic) to add to sig_*_offset | ||
| 337 | * @hdr_offset: offset of the load header (see struct hs_load_header) | ||
| 338 | * @hdr_size: size of above header | ||
| 339 | * | ||
| 340 | * This structure is embedded in the HS firmware image at | ||
| 341 | * hs_bin_hdr.header_offset. | ||
| 342 | */ | ||
| 343 | struct hsf_fw_header { | ||
| 344 | u32 sig_dbg_offset; | ||
| 345 | u32 sig_dbg_size; | ||
| 346 | u32 sig_prod_offset; | ||
| 347 | u32 sig_prod_size; | ||
| 348 | u32 patch_loc; | ||
| 349 | u32 patch_sig; | ||
| 350 | u32 hdr_offset; | ||
| 351 | u32 hdr_size; | ||
| 352 | }; | ||
| 353 | |||
| 354 | /** | ||
| 355 | * struct hsf_load_header - HS firmware load header | ||
| 356 | */ | ||
| 357 | struct hsf_load_header { | ||
| 358 | u32 non_sec_code_off; | ||
| 359 | u32 non_sec_code_size; | ||
| 360 | u32 data_dma_base; | ||
| 361 | u32 data_size; | ||
| 362 | u32 num_apps; | ||
| 363 | struct { | ||
| 364 | u32 sec_code_off; | ||
| 365 | u32 sec_code_size; | ||
| 366 | } app[0]; | ||
| 367 | }; | ||
| 368 | |||
| 369 | /** | ||
| 370 | * Convenience function to duplicate a firmware file in memory and check that | ||
| 371 | * it has the required minimum size. | ||
| 372 | */ | ||
| 373 | static void * | ||
| 374 | gm200_secboot_load_firmware(struct nvkm_subdev *subdev, const char *name, | ||
| 375 | size_t min_size) | ||
| 376 | { | ||
| 377 | const struct firmware *fw; | ||
| 378 | void *blob; | ||
| 379 | int ret; | ||
| 380 | |||
| 381 | ret = nvkm_firmware_get(subdev->device, name, &fw); | ||
| 382 | if (ret) | ||
| 383 | return ERR_PTR(ret); | ||
| 384 | if (fw->size < min_size) { | ||
| 385 | nvkm_error(subdev, "%s is smaller than expected size %zu\n", | ||
| 386 | name, min_size); | ||
| 387 | nvkm_firmware_put(fw); | ||
| 388 | return ERR_PTR(-EINVAL); | ||
| 389 | } | ||
| 390 | blob = kmemdup(fw->data, fw->size, GFP_KERNEL); | ||
| 391 | nvkm_firmware_put(fw); | ||
| 392 | if (!blob) | ||
| 393 | return ERR_PTR(-ENOMEM); | ||
| 394 | |||
| 395 | return blob; | ||
| 396 | } | ||
| 397 | |||
| 398 | |||
| 399 | /* | ||
| 400 | * Low-secure blob creation | ||
| 401 | */ | ||
| 402 | |||
| 403 | #define BL_DESC_BLK_SIZE 256 | ||
| 404 | /** | ||
| 405 | * Build a ucode image and descriptor from provided bootloader, code and data. | ||
| 406 | * | ||
| 407 | * @bl: bootloader image, including 16-bytes descriptor | ||
| 408 | * @code: LS firmware code segment | ||
| 409 | * @data: LS firmware data segment | ||
| 410 | * @desc: ucode descriptor to be written | ||
| 411 | * | ||
| 412 | * Return: allocated ucode image with corresponding descriptor information. desc | ||
| 413 | * is also updated to contain the right offsets within returned image. | ||
| 414 | */ | ||
| 415 | static void * | ||
| 416 | ls_ucode_img_build(const struct firmware *bl, const struct firmware *code, | ||
| 417 | const struct firmware *data, struct ls_ucode_img_desc *desc) | ||
| 418 | { | ||
| 419 | struct fw_bin_header *bin_hdr = (void *)bl->data; | ||
| 420 | struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset; | ||
| 421 | void *bl_data = (void *)bl->data + bin_hdr->data_offset; | ||
| 422 | u32 pos = 0; | ||
| 423 | void *image; | ||
| 424 | |||
| 425 | desc->bootloader_start_offset = pos; | ||
| 426 | desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32)); | ||
| 427 | desc->bootloader_imem_offset = bl_desc->start_tag * 256; | ||
| 428 | desc->bootloader_entry_point = bl_desc->start_tag * 256; | ||
| 429 | |||
| 430 | pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE); | ||
| 431 | desc->app_start_offset = pos; | ||
| 432 | desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) + | ||
| 433 | ALIGN(data->size, BL_DESC_BLK_SIZE); | ||
| 434 | desc->app_imem_offset = 0; | ||
| 435 | desc->app_imem_entry = 0; | ||
| 436 | desc->app_dmem_offset = 0; | ||
| 437 | desc->app_resident_code_offset = 0; | ||
| 438 | desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE); | ||
| 439 | |||
| 440 | pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE); | ||
| 441 | desc->app_resident_data_offset = pos - desc->app_start_offset; | ||
| 442 | desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE); | ||
| 443 | |||
| 444 | desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) + | ||
| 445 | desc->app_size; | ||
| 446 | |||
| 447 | image = kzalloc(desc->image_size, GFP_KERNEL); | ||
| 448 | if (!image) | ||
| 449 | return ERR_PTR(-ENOMEM); | ||
| 450 | |||
| 451 | memcpy(image + desc->bootloader_start_offset, bl_data, | ||
| 452 | bl_desc->code_size); | ||
| 453 | memcpy(image + desc->app_start_offset, code->data, code->size); | ||
| 454 | memcpy(image + desc->app_start_offset + desc->app_resident_data_offset, | ||
| 455 | data->data, data->size); | ||
| 456 | |||
| 457 | return image; | ||
| 458 | } | ||
| 459 | |||
| 460 | /** | ||
| 461 | * ls_ucode_img_load_generic() - load and prepare a LS ucode image | ||
| 462 | * | ||
| 463 | * Load the LS microcode, bootloader and signature and pack them into a single | ||
| 464 | * blob. Also generate the corresponding ucode descriptor. | ||
| 465 | */ | ||
| 466 | static int | ||
| 467 | ls_ucode_img_load_generic(struct nvkm_subdev *subdev, | ||
| 468 | struct ls_ucode_img *img, const char *falcon_name, | ||
| 469 | const u32 falcon_id) | ||
| 470 | { | ||
| 471 | const struct firmware *bl, *code, *data; | ||
| 472 | struct lsf_ucode_desc *lsf_desc; | ||
| 473 | char f[64]; | ||
| 474 | int ret; | ||
| 475 | |||
| 476 | img->ucode_header = NULL; | ||
| 477 | |||
| 478 | snprintf(f, sizeof(f), "gr/%s_bl", falcon_name); | ||
| 479 | ret = nvkm_firmware_get(subdev->device, f, &bl); | ||
| 480 | if (ret) | ||
| 481 | goto error; | ||
| 482 | |||
| 483 | snprintf(f, sizeof(f), "gr/%s_inst", falcon_name); | ||
| 484 | ret = nvkm_firmware_get(subdev->device, f, &code); | ||
| 485 | if (ret) | ||
| 486 | goto free_bl; | ||
| 487 | |||
| 488 | snprintf(f, sizeof(f), "gr/%s_data", falcon_name); | ||
| 489 | ret = nvkm_firmware_get(subdev->device, f, &data); | ||
| 490 | if (ret) | ||
| 491 | goto free_inst; | ||
| 492 | |||
| 493 | img->ucode_data = ls_ucode_img_build(bl, code, data, | ||
| 494 | &img->ucode_desc); | ||
| 495 | if (IS_ERR(img->ucode_data)) { | ||
| 496 | ret = PTR_ERR(img->ucode_data); | ||
| 497 | goto free_data; | ||
| 498 | } | ||
| 499 | img->ucode_size = img->ucode_desc.image_size; | ||
| 500 | |||
| 501 | snprintf(f, sizeof(f), "gr/%s_sig", falcon_name); | ||
| 502 | lsf_desc = gm200_secboot_load_firmware(subdev, f, sizeof(*lsf_desc)); | ||
| 503 | if (IS_ERR(lsf_desc)) { | ||
| 504 | ret = PTR_ERR(lsf_desc); | ||
| 505 | goto free_image; | ||
| 506 | } | ||
| 507 | /* not needed? the signature should already have the right value */ | ||
| 508 | lsf_desc->falcon_id = falcon_id; | ||
| 509 | memcpy(&img->lsb_header.signature, lsf_desc, sizeof(*lsf_desc)); | ||
| 510 | img->falcon_id = lsf_desc->falcon_id; | ||
| 511 | kfree(lsf_desc); | ||
| 512 | |||
| 513 | /* success path - only free requested firmware files */ | ||
| 514 | goto free_data; | ||
| 515 | |||
| 516 | free_image: | ||
| 517 | kfree(img->ucode_data); | ||
| 518 | free_data: | ||
| 519 | nvkm_firmware_put(data); | ||
| 520 | free_inst: | ||
| 521 | nvkm_firmware_put(code); | ||
| 522 | free_bl: | ||
| 523 | nvkm_firmware_put(bl); | ||
| 524 | error: | ||
| 525 | return ret; | ||
| 526 | } | ||
| 527 | |||
| 528 | typedef int (*lsf_load_func)(struct nvkm_subdev *, struct ls_ucode_img *); | ||
| 529 | |||
| 530 | static int | ||
| 531 | ls_ucode_img_load_fecs(struct nvkm_subdev *subdev, struct ls_ucode_img *img) | ||
| 532 | { | ||
| 533 | return ls_ucode_img_load_generic(subdev, img, "fecs", | ||
| 534 | NVKM_SECBOOT_FALCON_FECS); | ||
| 535 | } | ||
| 536 | |||
| 537 | static int | ||
| 538 | ls_ucode_img_load_gpccs(struct nvkm_subdev *subdev, struct ls_ucode_img *img) | ||
| 539 | { | ||
| 540 | return ls_ucode_img_load_generic(subdev, img, "gpccs", | ||
| 541 | NVKM_SECBOOT_FALCON_GPCCS); | ||
| 542 | } | ||
| 543 | |||
| 544 | /** | ||
| 545 | * ls_ucode_img_load() - create a lsf_ucode_img and load it | ||
| 546 | */ | ||
| 547 | static struct ls_ucode_img * | ||
| 548 | ls_ucode_img_load(struct nvkm_subdev *subdev, lsf_load_func load_func) | ||
| 549 | { | ||
| 550 | struct ls_ucode_img *img; | ||
| 551 | int ret; | ||
| 552 | |||
| 553 | img = kzalloc(sizeof(*img), GFP_KERNEL); | ||
| 554 | if (!img) | ||
| 555 | return ERR_PTR(-ENOMEM); | ||
| 556 | |||
| 557 | ret = load_func(subdev, img); | ||
| 558 | if (ret) { | ||
| 559 | kfree(img); | ||
| 560 | return ERR_PTR(ret); | ||
| 561 | } | ||
| 562 | |||
| 563 | return img; | ||
| 564 | } | ||
| 565 | |||
| 566 | static const lsf_load_func lsf_load_funcs[] = { | ||
| 567 | [NVKM_SECBOOT_FALCON_END] = NULL, /* reserve enough space */ | ||
| 568 | [NVKM_SECBOOT_FALCON_FECS] = ls_ucode_img_load_fecs, | ||
| 569 | [NVKM_SECBOOT_FALCON_GPCCS] = ls_ucode_img_load_gpccs, | ||
| 570 | }; | ||
| 571 | |||
| 572 | /** | ||
| 573 | * ls_ucode_img_populate_bl_desc() - populate a DMEM BL descriptor for LS image | ||
| 574 | * @img: ucode image to generate against | ||
| 575 | * @desc: descriptor to populate | ||
| 576 | * @sb: secure boot state to use for base addresses | ||
| 577 | * | ||
| 578 | * Populate the DMEM BL descriptor with the information contained in a | ||
| 579 | * ls_ucode_desc. | ||
| 580 | * | ||
| 581 | */ | ||
| 582 | static void | ||
| 583 | ls_ucode_img_populate_bl_desc(struct ls_ucode_img *img, u64 wpr_addr, | ||
| 584 | struct gm200_flcn_bl_desc *desc) | ||
| 585 | { | ||
| 586 | struct ls_ucode_img_desc *pdesc = &img->ucode_desc; | ||
| 587 | u64 addr_base; | ||
| 588 | |||
| 589 | addr_base = wpr_addr + img->lsb_header.ucode_off + | ||
| 590 | pdesc->app_start_offset; | ||
| 591 | |||
| 592 | memset(desc, 0, sizeof(*desc)); | ||
| 593 | desc->ctx_dma = FALCON_DMAIDX_UCODE; | ||
| 594 | desc->code_dma_base.lo = lower_32_bits( | ||
| 595 | (addr_base + pdesc->app_resident_code_offset)); | ||
| 596 | desc->code_dma_base.hi = upper_32_bits( | ||
| 597 | (addr_base + pdesc->app_resident_code_offset)); | ||
| 598 | desc->non_sec_code_size = pdesc->app_resident_code_size; | ||
| 599 | desc->data_dma_base.lo = lower_32_bits( | ||
| 600 | (addr_base + pdesc->app_resident_data_offset)); | ||
| 601 | desc->data_dma_base.hi = upper_32_bits( | ||
| 602 | (addr_base + pdesc->app_resident_data_offset)); | ||
| 603 | desc->data_size = pdesc->app_resident_data_size; | ||
| 604 | desc->code_entry_point = pdesc->app_imem_entry; | ||
| 605 | } | ||
| 606 | |||
| 607 | #define LSF_LSB_HEADER_ALIGN 256 | ||
| 608 | #define LSF_BL_DATA_ALIGN 256 | ||
| 609 | #define LSF_BL_DATA_SIZE_ALIGN 256 | ||
| 610 | #define LSF_BL_CODE_SIZE_ALIGN 256 | ||
| 611 | #define LSF_UCODE_DATA_ALIGN 4096 | ||
| 612 | |||
| 613 | /** | ||
| 614 | * ls_ucode_img_fill_headers - fill the WPR and LSB headers of an image | ||
| 615 | * @gsb: secure boot device used | ||
| 616 | * @img: image to generate for | ||
| 617 | * @offset: offset in the WPR region where this image starts | ||
| 618 | * | ||
| 619 | * Allocate space in the WPR area from offset and write the WPR and LSB headers | ||
| 620 | * accordingly. | ||
| 621 | * | ||
| 622 | * Return: offset at the end of this image. | ||
| 623 | */ | ||
| 624 | static u32 | ||
| 625 | ls_ucode_img_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_img *img, | ||
| 626 | u32 offset) | ||
| 627 | { | ||
| 628 | struct lsf_wpr_header *whdr = &img->wpr_header; | ||
| 629 | struct lsf_lsb_header *lhdr = &img->lsb_header; | ||
| 630 | struct ls_ucode_img_desc *desc = &img->ucode_desc; | ||
| 631 | |||
| 632 | if (img->ucode_header) { | ||
| 633 | nvkm_fatal(&gsb->base.subdev, | ||
| 634 | "images withough loader are not supported yet!\n"); | ||
| 635 | return offset; | ||
| 636 | } | ||
| 637 | |||
| 638 | /* Fill WPR header */ | ||
| 639 | whdr->falcon_id = img->falcon_id; | ||
| 640 | whdr->bootstrap_owner = gsb->base.func->boot_falcon; | ||
| 641 | whdr->status = LSF_IMAGE_STATUS_COPY; | ||
| 642 | |||
| 643 | /* Align, save off, and include an LSB header size */ | ||
| 644 | offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); | ||
| 645 | whdr->lsb_offset = offset; | ||
| 646 | offset += sizeof(struct lsf_lsb_header); | ||
| 647 | |||
| 648 | /* | ||
| 649 | * Align, save off, and include the original (static) ucode | ||
| 650 | * image size | ||
| 651 | */ | ||
| 652 | offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); | ||
| 653 | lhdr->ucode_off = offset; | ||
| 654 | offset += img->ucode_size; | ||
| 655 | |||
| 656 | /* | ||
| 657 | * For falcons that use a boot loader (BL), we append a loader | ||
| 658 | * desc structure on the end of the ucode image and consider | ||
| 659 | * this the boot loader data. The host will then copy the loader | ||
| 660 | * desc args to this space within the WPR region (before locking | ||
| 661 | * down) and the HS bin will then copy them to DMEM 0 for the | ||
| 662 | * loader. | ||
| 663 | */ | ||
| 664 | lhdr->bl_code_size = ALIGN(desc->bootloader_size, | ||
| 665 | LSF_BL_CODE_SIZE_ALIGN); | ||
| 666 | lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, | ||
| 667 | LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; | ||
| 668 | lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + | ||
| 669 | lhdr->bl_code_size - lhdr->ucode_size; | ||
| 670 | /* | ||
| 671 | * Though the BL is located at 0th offset of the image, the VA | ||
| 672 | * is different to make sure that it doesn't collide the actual | ||
| 673 | * OS VA range | ||
| 674 | */ | ||
| 675 | lhdr->bl_imem_off = desc->bootloader_imem_offset; | ||
| 676 | lhdr->app_code_off = desc->app_start_offset + | ||
| 677 | desc->app_resident_code_offset; | ||
| 678 | lhdr->app_code_size = desc->app_resident_code_size; | ||
| 679 | lhdr->app_data_off = desc->app_start_offset + | ||
| 680 | desc->app_resident_data_offset; | ||
| 681 | lhdr->app_data_size = desc->app_resident_data_size; | ||
| 682 | |||
| 683 | lhdr->flags = 0; | ||
| 684 | if (img->falcon_id == gsb->base.func->boot_falcon) | ||
| 685 | lhdr->flags = LSF_FLAG_DMACTL_REQ_CTX; | ||
| 686 | |||
| 687 | /* GPCCS will be loaded using PRI */ | ||
| 688 | if (img->falcon_id == NVKM_SECBOOT_FALCON_GPCCS) | ||
| 689 | lhdr->flags |= LSF_FLAG_FORCE_PRIV_LOAD; | ||
| 690 | |||
| 691 | /* Align (size bloat) and save off BL descriptor size */ | ||
| 692 | lhdr->bl_data_size = ALIGN(sizeof(struct gm200_flcn_bl_desc), | ||
| 693 | LSF_BL_DATA_SIZE_ALIGN); | ||
| 694 | /* | ||
| 695 | * Align, save off, and include the additional BL data | ||
| 696 | */ | ||
| 697 | offset = ALIGN(offset, LSF_BL_DATA_ALIGN); | ||
| 698 | lhdr->bl_data_off = offset; | ||
| 699 | offset += lhdr->bl_data_size; | ||
| 700 | |||
| 701 | return offset; | ||
| 702 | } | ||
| 703 | |||
| 704 | static void | ||
| 705 | ls_ucode_mgr_init(struct ls_ucode_mgr *mgr) | ||
| 706 | { | ||
| 707 | memset(mgr, 0, sizeof(*mgr)); | ||
| 708 | INIT_LIST_HEAD(&mgr->img_list); | ||
| 709 | } | ||
| 710 | |||
| 711 | static void | ||
| 712 | ls_ucode_mgr_cleanup(struct ls_ucode_mgr *mgr) | ||
| 713 | { | ||
| 714 | struct ls_ucode_img *img, *t; | ||
| 715 | |||
| 716 | list_for_each_entry_safe(img, t, &mgr->img_list, node) { | ||
| 717 | kfree(img->ucode_data); | ||
| 718 | kfree(img->ucode_header); | ||
| 719 | kfree(img); | ||
| 720 | } | ||
| 721 | } | ||
| 722 | |||
| 723 | static void | ||
| 724 | ls_ucode_mgr_add_img(struct ls_ucode_mgr *mgr, struct ls_ucode_img *img) | ||
| 725 | { | ||
| 726 | mgr->count++; | ||
| 727 | list_add_tail(&img->node, &mgr->img_list); | ||
| 728 | } | ||
| 729 | |||
| 730 | /** | ||
| 731 | * ls_ucode_mgr_fill_headers - fill WPR and LSB headers of all managed images | ||
| 732 | */ | ||
| 733 | static void | ||
| 734 | ls_ucode_mgr_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr) | ||
| 735 | { | ||
| 736 | struct ls_ucode_img *img; | ||
| 737 | u32 offset; | ||
| 738 | |||
| 739 | /* | ||
| 740 | * Start with an array of WPR headers at the base of the WPR. | ||
| 741 | * The expectation here is that the secure falcon will do a single DMA | ||
| 742 | * read of this array and cache it internally so it's ok to pack these. | ||
| 743 | * Also, we add 1 to the falcon count to indicate the end of the array. | ||
| 744 | */ | ||
| 745 | offset = sizeof(struct lsf_wpr_header) * (mgr->count + 1); | ||
| 746 | |||
| 747 | /* | ||
| 748 | * Walk the managed falcons, accounting for the LSB structs | ||
| 749 | * as well as the ucode images. | ||
| 750 | */ | ||
| 751 | list_for_each_entry(img, &mgr->img_list, node) { | ||
| 752 | offset = ls_ucode_img_fill_headers(gsb, img, offset); | ||
| 753 | } | ||
| 754 | |||
| 755 | mgr->wpr_size = offset; | ||
| 756 | } | ||
| 757 | |||
| 758 | /** | ||
| 759 | * ls_ucode_mgr_write_wpr - write the WPR blob contents | ||
| 760 | */ | ||
| 761 | static int | ||
| 762 | ls_ucode_mgr_write_wpr(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr, | ||
| 763 | struct nvkm_gpuobj *wpr_blob) | ||
| 764 | { | ||
| 765 | struct ls_ucode_img *img; | ||
| 766 | u32 pos = 0; | ||
| 767 | |||
| 768 | nvkm_kmap(wpr_blob); | ||
| 769 | |||
| 770 | list_for_each_entry(img, &mgr->img_list, node) { | ||
| 771 | nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, | ||
| 772 | sizeof(img->wpr_header)); | ||
| 773 | |||
| 774 | nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, | ||
| 775 | &img->lsb_header, sizeof(img->lsb_header)); | ||
| 776 | |||
| 777 | /* Generate and write BL descriptor */ | ||
| 778 | if (!img->ucode_header) { | ||
| 779 | u8 desc[gsb->func->bl_desc_size]; | ||
| 780 | struct gm200_flcn_bl_desc gdesc; | ||
| 781 | |||
| 782 | ls_ucode_img_populate_bl_desc(img, gsb->wpr_addr, | ||
| 783 | &gdesc); | ||
| 784 | gsb->func->fixup_bl_desc(&gdesc, &desc); | ||
| 785 | nvkm_gpuobj_memcpy_to(wpr_blob, | ||
| 786 | img->lsb_header.bl_data_off, | ||
| 787 | &desc, gsb->func->bl_desc_size); | ||
| 788 | } | ||
| 789 | |||
| 790 | /* Copy ucode */ | ||
| 791 | nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, | ||
| 792 | img->ucode_data, img->ucode_size); | ||
| 793 | |||
| 794 | pos += sizeof(img->wpr_header); | ||
| 795 | } | ||
| 796 | |||
| 797 | nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); | ||
| 798 | |||
| 799 | nvkm_done(wpr_blob); | ||
| 800 | |||
| 801 | return 0; | ||
| 802 | } | ||
| 803 | |||
| 804 | /* Both size and address of WPR need to be 128K-aligned */ | ||
| 805 | #define WPR_ALIGNMENT 0x20000 | ||
| 806 | /** | ||
| 807 | * gm200_secboot_prepare_ls_blob() - prepare the LS blob | ||
| 808 | * | ||
| 809 | * For each securely managed falcon, load the FW, signatures and bootloaders and | ||
| 810 | * prepare a ucode blob. Then, compute the offsets in the WPR region for each | ||
| 811 | * blob, and finally write the headers and ucode blobs into a GPU object that | ||
| 812 | * will be copied into the WPR region by the HS firmware. | ||
| 813 | */ | ||
| 814 | static int | ||
| 815 | gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb) | ||
| 816 | { | ||
| 817 | struct nvkm_secboot *sb = &gsb->base; | ||
| 818 | struct nvkm_device *device = sb->subdev.device; | ||
| 819 | struct ls_ucode_mgr mgr; | ||
| 820 | int falcon_id; | ||
| 821 | int ret; | ||
| 822 | |||
| 823 | ls_ucode_mgr_init(&mgr); | ||
| 824 | |||
| 825 | /* Load all LS blobs */ | ||
| 826 | for_each_set_bit(falcon_id, &gsb->base.func->managed_falcons, | ||
| 827 | NVKM_SECBOOT_FALCON_END) { | ||
| 828 | struct ls_ucode_img *img; | ||
| 829 | |||
| 830 | img = ls_ucode_img_load(&sb->subdev, lsf_load_funcs[falcon_id]); | ||
| 831 | |||
| 832 | if (IS_ERR(img)) { | ||
| 833 | ret = PTR_ERR(img); | ||
| 834 | goto cleanup; | ||
| 835 | } | ||
| 836 | ls_ucode_mgr_add_img(&mgr, img); | ||
| 837 | } | ||
| 838 | |||
| 839 | /* | ||
| 840 | * Fill the WPR and LSF headers with the right offsets and compute | ||
| 841 | * required WPR size | ||
| 842 | */ | ||
| 843 | ls_ucode_mgr_fill_headers(gsb, &mgr); | ||
| 844 | mgr.wpr_size = ALIGN(mgr.wpr_size, WPR_ALIGNMENT); | ||
| 845 | |||
| 846 | /* Allocate GPU object that will contain the WPR region */ | ||
| 847 | ret = nvkm_gpuobj_new(device, mgr.wpr_size, WPR_ALIGNMENT, false, NULL, | ||
| 848 | &gsb->ls_blob); | ||
| 849 | if (ret) | ||
| 850 | goto cleanup; | ||
| 851 | |||
| 852 | nvkm_debug(&sb->subdev, "%d managed LS falcons, WPR size is %d bytes\n", | ||
| 853 | mgr.count, mgr.wpr_size); | ||
| 854 | |||
| 855 | /* If WPR address and size are not fixed, set them to fit the LS blob */ | ||
| 856 | if (!gsb->wpr_size) { | ||
| 857 | gsb->wpr_addr = gsb->ls_blob->addr; | ||
| 858 | gsb->wpr_size = gsb->ls_blob->size; | ||
| 859 | } | ||
| 860 | |||
| 861 | /* Write LS blob */ | ||
| 862 | ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob); | ||
| 863 | if (ret) | ||
| 864 | nvkm_gpuobj_del(&gsb->ls_blob); | ||
| 865 | |||
| 866 | cleanup: | ||
| 867 | ls_ucode_mgr_cleanup(&mgr); | ||
| 868 | |||
| 869 | return ret; | ||
| 870 | } | ||
| 871 | |||
| 872 | /* | ||
| 873 | * High-secure blob creation | ||
| 874 | */ | ||
| 875 | |||
| 876 | /** | ||
| 877 | * gm200_secboot_hsf_patch_signature() - patch HS blob with correct signature | ||
| 878 | */ | ||
| 879 | static void | ||
| 880 | gm200_secboot_hsf_patch_signature(struct gm200_secboot *gsb, void *acr_image) | ||
| 881 | { | ||
| 882 | struct nvkm_secboot *sb = &gsb->base; | ||
| 883 | struct fw_bin_header *hsbin_hdr = acr_image; | ||
| 884 | struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset; | ||
| 885 | void *hs_data = acr_image + hsbin_hdr->data_offset; | ||
| 886 | void *sig; | ||
| 887 | u32 sig_size; | ||
| 888 | |||
| 889 | /* Falcon in debug or production mode? */ | ||
| 890 | if ((nvkm_rd32(sb->subdev.device, sb->base + 0xc08) >> 20) & 0x1) { | ||
| 891 | sig = acr_image + fw_hdr->sig_dbg_offset; | ||
| 892 | sig_size = fw_hdr->sig_dbg_size; | ||
| 893 | } else { | ||
| 894 | sig = acr_image + fw_hdr->sig_prod_offset; | ||
| 895 | sig_size = fw_hdr->sig_prod_size; | ||
| 896 | } | ||
| 897 | |||
| 898 | /* Patch signature */ | ||
| 899 | memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size); | ||
| 900 | } | ||
| 901 | |||
| 902 | /** | ||
| 903 | * gm200_secboot_populate_hsf_bl_desc() - populate BL descriptor for HS image | ||
| 904 | */ | ||
| 905 | static void | ||
| 906 | gm200_secboot_populate_hsf_bl_desc(void *acr_image, | ||
| 907 | struct gm200_flcn_bl_desc *bl_desc) | ||
| 908 | { | ||
| 909 | struct fw_bin_header *hsbin_hdr = acr_image; | ||
| 910 | struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset; | ||
| 911 | struct hsf_load_header *load_hdr = acr_image + fw_hdr->hdr_offset; | ||
| 912 | |||
| 913 | /* | ||
| 914 | * Descriptor for the bootloader that will load the ACR image into | ||
| 915 | * IMEM/DMEM memory. | ||
| 916 | */ | ||
| 917 | fw_hdr = acr_image + hsbin_hdr->header_offset; | ||
| 918 | load_hdr = acr_image + fw_hdr->hdr_offset; | ||
| 919 | memset(bl_desc, 0, sizeof(*bl_desc)); | ||
| 920 | bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; | ||
| 921 | bl_desc->non_sec_code_off = load_hdr->non_sec_code_off; | ||
| 922 | bl_desc->non_sec_code_size = load_hdr->non_sec_code_size; | ||
| 923 | bl_desc->sec_code_off = load_hdr->app[0].sec_code_off; | ||
| 924 | bl_desc->sec_code_size = load_hdr->app[0].sec_code_size; | ||
| 925 | bl_desc->code_entry_point = 0; | ||
| 926 | /* | ||
| 927 | * We need to set code_dma_base to the virtual address of the acr_blob, | ||
| 928 | * and add this address to data_dma_base before writing it into DMEM | ||
| 929 | */ | ||
| 930 | bl_desc->code_dma_base.lo = 0; | ||
| 931 | bl_desc->data_dma_base.lo = load_hdr->data_dma_base; | ||
| 932 | bl_desc->data_size = load_hdr->data_size; | ||
| 933 | } | ||
| 934 | |||
| 935 | /** | ||
| 936 | * gm200_secboot_prepare_hs_blob - load and prepare a HS blob and BL descriptor | ||
| 937 | * | ||
| 938 | * @gsb secure boot instance to prepare for | ||
| 939 | * @fw name of the HS firmware to load | ||
| 940 | * @blob pointer to gpuobj that will be allocated to receive the HS FW payload | ||
| 941 | * @bl_desc pointer to the BL descriptor to write for this firmware | ||
| 942 | * @patch whether we should patch the HS descriptor (only for HS loaders) | ||
| 943 | */ | ||
| 944 | static int | ||
| 945 | gm200_secboot_prepare_hs_blob(struct gm200_secboot *gsb, const char *fw, | ||
| 946 | struct nvkm_gpuobj **blob, | ||
| 947 | struct gm200_flcn_bl_desc *bl_desc, bool patch) | ||
| 948 | { | ||
| 949 | struct nvkm_subdev *subdev = &gsb->base.subdev; | ||
| 950 | void *acr_image; | ||
| 951 | struct fw_bin_header *hsbin_hdr; | ||
| 952 | struct hsf_fw_header *fw_hdr; | ||
| 953 | void *acr_data; | ||
| 954 | struct hsf_load_header *load_hdr; | ||
| 955 | struct hsflcn_acr_desc *desc; | ||
| 956 | int ret; | ||
| 957 | |||
| 958 | acr_image = gm200_secboot_load_firmware(subdev, fw, 0); | ||
| 959 | if (IS_ERR(acr_image)) | ||
| 960 | return PTR_ERR(acr_image); | ||
| 961 | hsbin_hdr = acr_image; | ||
| 962 | |||
| 963 | /* Patch signature */ | ||
| 964 | gm200_secboot_hsf_patch_signature(gsb, acr_image); | ||
| 965 | |||
| 966 | acr_data = acr_image + hsbin_hdr->data_offset; | ||
| 967 | |||
| 968 | /* Patch descriptor? */ | ||
| 969 | if (patch) { | ||
| 970 | fw_hdr = acr_image + hsbin_hdr->header_offset; | ||
| 971 | load_hdr = acr_image + fw_hdr->hdr_offset; | ||
| 972 | desc = acr_data + load_hdr->data_dma_base; | ||
| 973 | gsb->func->fixup_hs_desc(gsb, desc); | ||
| 974 | } | ||
| 975 | |||
| 976 | /* Generate HS BL descriptor */ | ||
| 977 | gm200_secboot_populate_hsf_bl_desc(acr_image, bl_desc); | ||
| 978 | |||
| 979 | /* Create ACR blob and copy HS data to it */ | ||
| 980 | ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), | ||
| 981 | 0x1000, false, NULL, blob); | ||
| 982 | if (ret) | ||
| 983 | goto cleanup; | ||
| 984 | |||
| 985 | nvkm_kmap(*blob); | ||
| 986 | nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size); | ||
| 987 | nvkm_done(*blob); | ||
| 988 | |||
| 989 | cleanup: | ||
| 990 | kfree(acr_image); | ||
| 991 | |||
| 992 | return ret; | ||
| 993 | } | ||
| 994 | |||
| 995 | /* | ||
| 996 | * High-secure bootloader blob creation | ||
| 997 | */ | ||
| 998 | |||
| 999 | static int | ||
| 1000 | gm200_secboot_prepare_hsbl_blob(struct gm200_secboot *gsb) | ||
| 1001 | { | ||
| 1002 | struct nvkm_subdev *subdev = &gsb->base.subdev; | ||
| 1003 | |||
| 1004 | gsb->hsbl_blob = gm200_secboot_load_firmware(subdev, "acr/bl", 0); | ||
| 1005 | if (IS_ERR(gsb->hsbl_blob)) { | ||
| 1006 | int ret = PTR_ERR(gsb->hsbl_blob); | ||
| 1007 | |||
| 1008 | gsb->hsbl_blob = NULL; | ||
| 1009 | return ret; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | return 0; | ||
| 1013 | } | ||
| 1014 | 31 | ||
| 1015 | /** | 32 | /** |
| 1016 | * gm20x_secboot_prepare_blobs - load blobs common to all GM20X GPUs. | 33 | * gm200_secboot_run_blob() - run the given high-secure blob |
| 1017 | * | 34 | * |
| 1018 | * This includes the LS blob, HS ucode loading blob, and HS bootloader. | ||
| 1019 | * | ||
| 1020 | * The HS ucode unload blob is only used on dGPU. | ||
| 1021 | */ | 35 | */ |
| 1022 | int | 36 | int |
| 1023 | gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb) | 37 | gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob) |
| 1024 | { | ||
| 1025 | int ret; | ||
| 1026 | |||
| 1027 | /* Load and prepare the managed falcon's firmwares */ | ||
| 1028 | if (!gsb->ls_blob) { | ||
| 1029 | ret = gm200_secboot_prepare_ls_blob(gsb); | ||
| 1030 | if (ret) | ||
| 1031 | return ret; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | /* Load the HS firmware that will load the LS firmwares */ | ||
| 1035 | if (!gsb->acr_load_blob) { | ||
| 1036 | ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load", | ||
| 1037 | &gsb->acr_load_blob, | ||
| 1038 | &gsb->acr_load_bl_desc, true); | ||
| 1039 | if (ret) | ||
| 1040 | return ret; | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | /* Load the HS firmware bootloader */ | ||
| 1044 | if (!gsb->hsbl_blob) { | ||
| 1045 | ret = gm200_secboot_prepare_hsbl_blob(gsb); | ||
| 1046 | if (ret) | ||
| 1047 | return ret; | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | return 0; | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | static int | ||
| 1054 | gm200_secboot_prepare_blobs(struct gm200_secboot *gsb) | ||
| 1055 | { | ||
| 1056 | int ret; | ||
| 1057 | |||
| 1058 | ret = gm20x_secboot_prepare_blobs(gsb); | ||
| 1059 | if (ret) | ||
| 1060 | return ret; | ||
| 1061 | |||
| 1062 | /* dGPU only: load the HS firmware that unprotects the WPR region */ | ||
| 1063 | if (!gsb->acr_unload_blob) { | ||
| 1064 | ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload", | ||
| 1065 | &gsb->acr_unload_blob, | ||
| 1066 | &gsb->acr_unload_bl_desc, false); | ||
| 1067 | if (ret) | ||
| 1068 | return ret; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | return 0; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | static int | ||
| 1075 | gm200_secboot_blobs_ready(struct gm200_secboot *gsb) | ||
| 1076 | { | 38 | { |
| 39 | struct gm200_secboot *gsb = gm200_secboot(sb); | ||
| 1077 | struct nvkm_subdev *subdev = &gsb->base.subdev; | 40 | struct nvkm_subdev *subdev = &gsb->base.subdev; |
| 41 | struct nvkm_falcon *falcon = gsb->base.boot_falcon; | ||
| 42 | struct nvkm_vma vma; | ||
| 1078 | int ret; | 43 | int ret; |
| 1079 | 44 | ||
| 1080 | /* firmware already loaded, nothing to do... */ | 45 | ret = nvkm_falcon_get(falcon, subdev); |
| 1081 | if (gsb->firmware_ok) | ||
| 1082 | return 0; | ||
| 1083 | |||
| 1084 | ret = gsb->func->prepare_blobs(gsb); | ||
| 1085 | if (ret) { | ||
| 1086 | nvkm_error(subdev, "failed to load secure firmware\n"); | ||
| 1087 | return ret; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | gsb->firmware_ok = true; | ||
| 1091 | |||
| 1092 | return 0; | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | |||
| 1096 | /* | ||
| 1097 | * Secure Boot Execution | ||
| 1098 | */ | ||
| 1099 | |||
| 1100 | /** | ||
| 1101 | * gm200_secboot_load_hs_bl() - load HS bootloader into DMEM and IMEM | ||
| 1102 | */ | ||
| 1103 | static void | ||
| 1104 | gm200_secboot_load_hs_bl(struct gm200_secboot *gsb, void *data, u32 data_size) | ||
| 1105 | { | ||
| 1106 | struct nvkm_device *device = gsb->base.subdev.device; | ||
| 1107 | struct fw_bin_header *hdr = gsb->hsbl_blob; | ||
| 1108 | struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset; | ||
| 1109 | void *blob_data = gsb->hsbl_blob + hdr->data_offset; | ||
| 1110 | void *hsbl_code = blob_data + hsbl_desc->code_off; | ||
| 1111 | void *hsbl_data = blob_data + hsbl_desc->data_off; | ||
| 1112 | u32 code_size = ALIGN(hsbl_desc->code_size, 256); | ||
| 1113 | const u32 base = gsb->base.base; | ||
| 1114 | u32 blk; | ||
| 1115 | u32 tag; | ||
| 1116 | int i; | ||
| 1117 | |||
| 1118 | /* | ||
| 1119 | * Copy HS bootloader data | ||
| 1120 | */ | ||
| 1121 | nvkm_wr32(device, base + 0x1c0, (0x00000000 | (0x1 << 24))); | ||
| 1122 | for (i = 0; i < hsbl_desc->data_size / 4; i++) | ||
| 1123 | nvkm_wr32(device, base + 0x1c4, ((u32 *)hsbl_data)[i]); | ||
| 1124 | |||
| 1125 | /* | ||
| 1126 | * Copy HS bootloader interface structure where the HS descriptor | ||
| 1127 | * expects it to be | ||
| 1128 | */ | ||
| 1129 | nvkm_wr32(device, base + 0x1c0, | ||
| 1130 | (hsbl_desc->dmem_load_off | (0x1 << 24))); | ||
| 1131 | for (i = 0; i < data_size / 4; i++) | ||
| 1132 | nvkm_wr32(device, base + 0x1c4, ((u32 *)data)[i]); | ||
| 1133 | |||
| 1134 | /* Copy HS bootloader code to end of IMEM */ | ||
| 1135 | blk = (nvkm_rd32(device, base + 0x108) & 0x1ff) - (code_size >> 8); | ||
| 1136 | tag = hsbl_desc->start_tag; | ||
| 1137 | nvkm_wr32(device, base + 0x180, ((blk & 0xff) << 8) | (0x1 << 24)); | ||
| 1138 | for (i = 0; i < code_size / 4; i++) { | ||
| 1139 | /* write new tag every 256B */ | ||
| 1140 | if ((i & 0x3f) == 0) { | ||
| 1141 | nvkm_wr32(device, base + 0x188, tag & 0xffff); | ||
| 1142 | tag++; | ||
| 1143 | } | ||
| 1144 | nvkm_wr32(device, base + 0x184, ((u32 *)hsbl_code)[i]); | ||
| 1145 | } | ||
| 1146 | nvkm_wr32(device, base + 0x188, 0); | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | /** | ||
| 1150 | * gm200_secboot_setup_falcon() - set up the secure falcon for secure boot | ||
| 1151 | */ | ||
| 1152 | static int | ||
| 1153 | gm200_secboot_setup_falcon(struct gm200_secboot *gsb) | ||
| 1154 | { | ||
| 1155 | struct nvkm_device *device = gsb->base.subdev.device; | ||
| 1156 | struct fw_bin_header *hdr = gsb->hsbl_blob; | ||
| 1157 | struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset; | ||
| 1158 | /* virtual start address for boot vector */ | ||
| 1159 | u32 virt_addr = hsbl_desc->start_tag << 8; | ||
| 1160 | const u32 base = gsb->base.base; | ||
| 1161 | const u32 reg_base = base + 0xe00; | ||
| 1162 | u32 inst_loc; | ||
| 1163 | int ret; | ||
| 1164 | |||
| 1165 | ret = nvkm_secboot_falcon_reset(&gsb->base); | ||
| 1166 | if (ret) | 46 | if (ret) |
| 1167 | return ret; | 47 | return ret; |
| 1168 | 48 | ||
| 1169 | /* setup apertures - virtual */ | ||
| 1170 | nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_UCODE), 0x4); | ||
| 1171 | nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_VIRT), 0x0); | ||
| 1172 | /* setup apertures - physical */ | ||
| 1173 | nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_VID), 0x4); | ||
| 1174 | nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_COH), | ||
| 1175 | 0x4 | 0x1); | ||
| 1176 | nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_NCOH), | ||
| 1177 | 0x4 | 0x2); | ||
| 1178 | |||
| 1179 | /* Set context */ | ||
| 1180 | if (nvkm_memory_target(gsb->inst->memory) == NVKM_MEM_TARGET_VRAM) | ||
| 1181 | inst_loc = 0x0; /* FB */ | ||
| 1182 | else | ||
| 1183 | inst_loc = 0x3; /* Non-coherent sysmem */ | ||
| 1184 | |||
| 1185 | nvkm_mask(device, base + 0x048, 0x1, 0x1); | ||
| 1186 | nvkm_wr32(device, base + 0x480, | ||
| 1187 | ((gsb->inst->addr >> 12) & 0xfffffff) | | ||
| 1188 | (inst_loc << 28) | (1 << 30)); | ||
| 1189 | |||
| 1190 | /* Set boot vector to code's starting virtual address */ | ||
| 1191 | nvkm_wr32(device, base + 0x104, virt_addr); | ||
| 1192 | |||
| 1193 | return 0; | ||
| 1194 | } | ||
| 1195 | |||
| 1196 | /** | ||
| 1197 | * gm200_secboot_run_hs_blob() - run the given high-secure blob | ||
| 1198 | */ | ||
| 1199 | static int | ||
| 1200 | gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob, | ||
| 1201 | struct gm200_flcn_bl_desc *desc) | ||
| 1202 | { | ||
| 1203 | struct nvkm_vma vma; | ||
| 1204 | u64 vma_addr; | ||
| 1205 | const u32 bl_desc_size = gsb->func->bl_desc_size; | ||
| 1206 | u8 bl_desc[bl_desc_size]; | ||
| 1207 | int ret; | ||
| 1208 | |||
| 1209 | /* Map the HS firmware so the HS bootloader can see it */ | 49 | /* Map the HS firmware so the HS bootloader can see it */ |
| 1210 | ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma); | 50 | ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma); |
| 1211 | if (ret) | 51 | if (ret) { |
| 52 | nvkm_falcon_put(falcon, subdev); | ||
| 1212 | return ret; | 53 | return ret; |
| 54 | } | ||
| 1213 | 55 | ||
| 1214 | /* Add the mapping address to the DMA bases */ | 56 | /* Reset and set the falcon up */ |
| 1215 | vma_addr = flcn64_to_u64(desc->code_dma_base) + vma.offset; | 57 | ret = nvkm_falcon_reset(falcon); |
| 1216 | desc->code_dma_base.lo = lower_32_bits(vma_addr); | ||
| 1217 | desc->code_dma_base.hi = upper_32_bits(vma_addr); | ||
| 1218 | vma_addr = flcn64_to_u64(desc->data_dma_base) + vma.offset; | ||
| 1219 | desc->data_dma_base.lo = lower_32_bits(vma_addr); | ||
| 1220 | desc->data_dma_base.hi = upper_32_bits(vma_addr); | ||
| 1221 | |||
| 1222 | /* Fixup the BL header */ | ||
| 1223 | gsb->func->fixup_bl_desc(desc, &bl_desc); | ||
| 1224 | |||
| 1225 | /* Reset the falcon and make it ready to run the HS bootloader */ | ||
| 1226 | ret = gm200_secboot_setup_falcon(gsb); | ||
| 1227 | if (ret) | 58 | if (ret) |
| 1228 | goto done; | 59 | goto end; |
| 60 | nvkm_falcon_bind_context(falcon, gsb->inst); | ||
| 1229 | 61 | ||
| 1230 | /* Load the HS bootloader into the falcon's IMEM/DMEM */ | 62 | /* Load the HS bootloader into the falcon's IMEM/DMEM */ |
| 1231 | gm200_secboot_load_hs_bl(gsb, &bl_desc, bl_desc_size); | 63 | ret = sb->acr->func->load(sb->acr, &gsb->base, blob, vma.offset); |
| 1232 | |||
| 1233 | /* Start the HS bootloader */ | ||
| 1234 | ret = nvkm_secboot_falcon_run(&gsb->base); | ||
| 1235 | if (ret) | 64 | if (ret) |
| 1236 | goto done; | 65 | goto end; |
| 1237 | |||
| 1238 | done: | ||
| 1239 | /* Restore the original DMA addresses */ | ||
| 1240 | vma_addr = flcn64_to_u64(desc->code_dma_base) - vma.offset; | ||
| 1241 | desc->code_dma_base.lo = lower_32_bits(vma_addr); | ||
| 1242 | desc->code_dma_base.hi = upper_32_bits(vma_addr); | ||
| 1243 | vma_addr = flcn64_to_u64(desc->data_dma_base) - vma.offset; | ||
| 1244 | desc->data_dma_base.lo = lower_32_bits(vma_addr); | ||
| 1245 | desc->data_dma_base.hi = upper_32_bits(vma_addr); | ||
| 1246 | |||
| 1247 | /* We don't need the ACR firmware anymore */ | ||
| 1248 | nvkm_gpuobj_unmap(&vma); | ||
| 1249 | 66 | ||
| 1250 | return ret; | 67 | /* Disable interrupts as we will poll for the HALT bit */ |
| 1251 | } | 68 | nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false); |
| 1252 | 69 | ||
| 1253 | /* | 70 | /* Set default error value in mailbox register */ |
| 1254 | * gm200_secboot_reset() - execute secure boot from the prepared state | 71 | nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); |
| 1255 | * | ||
| 1256 | * Load the HS bootloader and ask the falcon to run it. This will in turn | ||
| 1257 | * load the HS firmware and run it, so once the falcon stops all the managed | ||
| 1258 | * falcons should have their LS firmware loaded and be ready to run. | ||
| 1259 | */ | ||
| 1260 | int | ||
| 1261 | gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon) | ||
| 1262 | { | ||
| 1263 | struct gm200_secboot *gsb = gm200_secboot(sb); | ||
| 1264 | int ret; | ||
| 1265 | 72 | ||
| 1266 | /* Make sure all blobs are ready */ | 73 | /* Start the HS bootloader */ |
| 1267 | ret = gm200_secboot_blobs_ready(gsb); | 74 | nvkm_falcon_set_start_addr(falcon, sb->acr->start_address); |
| 75 | nvkm_falcon_start(falcon); | ||
| 76 | ret = nvkm_falcon_wait_for_halt(falcon, 100); | ||
| 1268 | if (ret) | 77 | if (ret) |
| 1269 | return ret; | ||
| 1270 | |||
| 1271 | /* | ||
| 1272 | * Dummy GM200 implementation: perform secure boot each time we are | ||
| 1273 | * called on FECS. Since only FECS and GPCCS are managed and started | ||
| 1274 | * together, this ought to be safe. | ||
| 1275 | * | ||
| 1276 | * Once we have proper PMU firmware and support, this will be changed | ||
| 1277 | * to a proper call to the PMU method. | ||
| 1278 | */ | ||
| 1279 | if (falcon != NVKM_SECBOOT_FALCON_FECS) | ||
| 1280 | goto end; | 78 | goto end; |
| 1281 | 79 | ||
| 1282 | /* If WPR is set and we have an unload blob, run it to unlock WPR */ | 80 | /* If mailbox register contains an error code, then ACR has failed */ |
| 1283 | if (gsb->acr_unload_blob && | 81 | ret = nvkm_falcon_rd32(falcon, 0x040); |
| 1284 | gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) { | 82 | if (ret) { |
| 1285 | ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob, | 83 | nvkm_error(subdev, "ACR boot failed, ret 0x%08x", ret); |
| 1286 | &gsb->acr_unload_bl_desc); | 84 | ret = -EINVAL; |
| 1287 | if (ret) | 85 | goto end; |
| 1288 | return ret; | ||
| 1289 | } | 86 | } |
| 1290 | 87 | ||
| 1291 | /* Reload all managed falcons */ | ||
| 1292 | ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_load_blob, | ||
| 1293 | &gsb->acr_load_bl_desc); | ||
| 1294 | if (ret) | ||
| 1295 | return ret; | ||
| 1296 | |||
| 1297 | end: | 88 | end: |
| 1298 | gsb->falcon_state[falcon] = RESET; | 89 | /* Reenable interrupts */ |
| 1299 | return 0; | 90 | nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true); |
| 1300 | } | ||
| 1301 | 91 | ||
| 1302 | int | 92 | /* We don't need the ACR firmware anymore */ |
| 1303 | gm200_secboot_start(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon) | 93 | nvkm_gpuobj_unmap(&vma); |
| 1304 | { | 94 | nvkm_falcon_put(falcon, subdev); |
| 1305 | struct gm200_secboot *gsb = gm200_secboot(sb); | ||
| 1306 | int base; | ||
| 1307 | |||
| 1308 | switch (falcon) { | ||
| 1309 | case NVKM_SECBOOT_FALCON_FECS: | ||
| 1310 | base = 0x409000; | ||
| 1311 | break; | ||
| 1312 | case NVKM_SECBOOT_FALCON_GPCCS: | ||
| 1313 | base = 0x41a000; | ||
| 1314 | break; | ||
| 1315 | default: | ||
| 1316 | nvkm_error(&sb->subdev, "cannot start unhandled falcon!\n"); | ||
| 1317 | return -EINVAL; | ||
| 1318 | } | ||
| 1319 | |||
| 1320 | nvkm_wr32(sb->subdev.device, base + 0x130, 0x00000002); | ||
| 1321 | gsb->falcon_state[falcon] = RUNNING; | ||
| 1322 | 95 | ||
| 1323 | return 0; | 96 | return ret; |
| 1324 | } | 97 | } |
| 1325 | 98 | ||
| 1326 | |||
| 1327 | |||
| 1328 | int | 99 | int |
| 1329 | gm200_secboot_init(struct nvkm_secboot *sb) | 100 | gm200_secboot_oneinit(struct nvkm_secboot *sb) |
| 1330 | { | 101 | { |
| 1331 | struct gm200_secboot *gsb = gm200_secboot(sb); | 102 | struct gm200_secboot *gsb = gm200_secboot(sb); |
| 1332 | struct nvkm_device *device = sb->subdev.device; | 103 | struct nvkm_device *device = sb->subdev.device; |
| @@ -1361,24 +132,22 @@ gm200_secboot_init(struct nvkm_secboot *sb) | |||
| 1361 | nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1)); | 132 | nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1)); |
| 1362 | nvkm_done(gsb->inst); | 133 | nvkm_done(gsb->inst); |
| 1363 | 134 | ||
| 135 | if (sb->acr->func->oneinit) { | ||
| 136 | ret = sb->acr->func->oneinit(sb->acr, sb); | ||
| 137 | if (ret) | ||
| 138 | return ret; | ||
| 139 | } | ||
| 140 | |||
| 1364 | return 0; | 141 | return 0; |
| 1365 | } | 142 | } |
| 1366 | 143 | ||
| 1367 | static int | 144 | int |
| 1368 | gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend) | 145 | gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend) |
| 1369 | { | 146 | { |
| 1370 | struct gm200_secboot *gsb = gm200_secboot(sb); | ||
| 1371 | int ret = 0; | 147 | int ret = 0; |
| 1372 | int i; | ||
| 1373 | 148 | ||
| 1374 | /* Run the unload blob to unprotect the WPR region */ | 149 | if (sb->acr->func->fini) |
| 1375 | if (gsb->acr_unload_blob && | 150 | ret = sb->acr->func->fini(sb->acr, sb, suspend); |
| 1376 | gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) | ||
| 1377 | ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob, | ||
| 1378 | &gsb->acr_unload_bl_desc); | ||
| 1379 | |||
| 1380 | for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) | ||
| 1381 | gsb->falcon_state[i] = NON_SECURE; | ||
| 1382 | 151 | ||
| 1383 | return ret; | 152 | return ret; |
| 1384 | } | 153 | } |
| @@ -1388,11 +157,7 @@ gm200_secboot_dtor(struct nvkm_secboot *sb) | |||
| 1388 | { | 157 | { |
| 1389 | struct gm200_secboot *gsb = gm200_secboot(sb); | 158 | struct gm200_secboot *gsb = gm200_secboot(sb); |
| 1390 | 159 | ||
| 1391 | nvkm_gpuobj_del(&gsb->acr_unload_blob); | 160 | sb->acr->func->dtor(sb->acr); |
| 1392 | |||
| 1393 | kfree(gsb->hsbl_blob); | ||
| 1394 | nvkm_gpuobj_del(&gsb->acr_load_blob); | ||
| 1395 | nvkm_gpuobj_del(&gsb->ls_blob); | ||
| 1396 | 161 | ||
| 1397 | nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd); | 162 | nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd); |
| 1398 | nvkm_gpuobj_del(&gsb->pgd); | 163 | nvkm_gpuobj_del(&gsb->pgd); |
| @@ -1405,50 +170,9 @@ gm200_secboot_dtor(struct nvkm_secboot *sb) | |||
| 1405 | static const struct nvkm_secboot_func | 170 | static const struct nvkm_secboot_func |
| 1406 | gm200_secboot = { | 171 | gm200_secboot = { |
| 1407 | .dtor = gm200_secboot_dtor, | 172 | .dtor = gm200_secboot_dtor, |
| 1408 | .init = gm200_secboot_init, | 173 | .oneinit = gm200_secboot_oneinit, |
| 1409 | .fini = gm200_secboot_fini, | 174 | .fini = gm200_secboot_fini, |
| 1410 | .reset = gm200_secboot_reset, | 175 | .run_blob = gm200_secboot_run_blob, |
| 1411 | .start = gm200_secboot_start, | ||
| 1412 | .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) | | ||
| 1413 | BIT(NVKM_SECBOOT_FALCON_GPCCS), | ||
| 1414 | .boot_falcon = NVKM_SECBOOT_FALCON_PMU, | ||
| 1415 | }; | ||
| 1416 | |||
| 1417 | /** | ||
| 1418 | * gm200_fixup_bl_desc - just copy the BL descriptor | ||
| 1419 | * | ||
| 1420 | * Use the GM200 descriptor format by default. | ||
| 1421 | */ | ||
| 1422 | static void | ||
| 1423 | gm200_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret) | ||
| 1424 | { | ||
| 1425 | memcpy(ret, desc, sizeof(*desc)); | ||
| 1426 | } | ||
| 1427 | |||
| 1428 | static void | ||
| 1429 | gm200_secboot_fixup_hs_desc(struct gm200_secboot *gsb, | ||
| 1430 | struct hsflcn_acr_desc *desc) | ||
| 1431 | { | ||
| 1432 | desc->ucode_blob_base = gsb->ls_blob->addr; | ||
| 1433 | desc->ucode_blob_size = gsb->ls_blob->size; | ||
| 1434 | |||
| 1435 | desc->wpr_offset = 0; | ||
| 1436 | |||
| 1437 | /* WPR region information for the HS binary to set up */ | ||
| 1438 | desc->wpr_region_id = 1; | ||
| 1439 | desc->regions.no_regions = 1; | ||
| 1440 | desc->regions.region_props[0].region_id = 1; | ||
| 1441 | desc->regions.region_props[0].start_addr = gsb->wpr_addr >> 8; | ||
| 1442 | desc->regions.region_props[0].end_addr = | ||
| 1443 | (gsb->wpr_addr + gsb->wpr_size) >> 8; | ||
| 1444 | } | ||
| 1445 | |||
| 1446 | static const struct gm200_secboot_func | ||
| 1447 | gm200_secboot_func = { | ||
| 1448 | .bl_desc_size = sizeof(struct gm200_flcn_bl_desc), | ||
| 1449 | .fixup_bl_desc = gm200_secboot_fixup_bl_desc, | ||
| 1450 | .fixup_hs_desc = gm200_secboot_fixup_hs_desc, | ||
| 1451 | .prepare_blobs = gm200_secboot_prepare_blobs, | ||
| 1452 | }; | 176 | }; |
| 1453 | 177 | ||
| 1454 | int | 178 | int |
| @@ -1457,6 +181,12 @@ gm200_secboot_new(struct nvkm_device *device, int index, | |||
| 1457 | { | 181 | { |
| 1458 | int ret; | 182 | int ret; |
| 1459 | struct gm200_secboot *gsb; | 183 | struct gm200_secboot *gsb; |
| 184 | struct nvkm_acr *acr; | ||
| 185 | |||
| 186 | acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) | | ||
| 187 | BIT(NVKM_SECBOOT_FALCON_GPCCS)); | ||
| 188 | if (IS_ERR(acr)) | ||
| 189 | return PTR_ERR(acr); | ||
| 1460 | 190 | ||
| 1461 | gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); | 191 | gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); |
| 1462 | if (!gsb) { | 192 | if (!gsb) { |
| @@ -1465,15 +195,14 @@ gm200_secboot_new(struct nvkm_device *device, int index, | |||
| 1465 | } | 195 | } |
| 1466 | *psb = &gsb->base; | 196 | *psb = &gsb->base; |
| 1467 | 197 | ||
| 1468 | ret = nvkm_secboot_ctor(&gm200_secboot, device, index, &gsb->base); | 198 | ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base); |
| 1469 | if (ret) | 199 | if (ret) |
| 1470 | return ret; | 200 | return ret; |
| 1471 | 201 | ||
| 1472 | gsb->func = &gm200_secboot_func; | ||
| 1473 | |||
| 1474 | return 0; | 202 | return 0; |
| 1475 | } | 203 | } |
| 1476 | 204 | ||
| 205 | |||
| 1477 | MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); | 206 | MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); |
| 1478 | MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); | 207 | MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); |
| 1479 | MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); | 208 | MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h new file mode 100644 index 000000000000..45adf1a3bc20 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef __NVKM_SECBOOT_GM200_H__ | ||
| 24 | #define __NVKM_SECBOOT_GM200_H__ | ||
| 25 | |||
| 26 | #include "priv.h" | ||
| 27 | |||
| 28 | struct gm200_secboot { | ||
| 29 | struct nvkm_secboot base; | ||
| 30 | |||
| 31 | /* Instance block & address space used for HS FW execution */ | ||
| 32 | struct nvkm_gpuobj *inst; | ||
| 33 | struct nvkm_gpuobj *pgd; | ||
| 34 | struct nvkm_vm *vm; | ||
| 35 | }; | ||
| 36 | #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) | ||
| 37 | |||
| 38 | int gm200_secboot_oneinit(struct nvkm_secboot *); | ||
| 39 | int gm200_secboot_fini(struct nvkm_secboot *, bool); | ||
| 40 | void *gm200_secboot_dtor(struct nvkm_secboot *); | ||
| 41 | int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *); | ||
| 42 | |||
| 43 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c index d5395ebfe8d3..6707b8edc086 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c | |||
| @@ -20,103 +20,8 @@ | |||
| 20 | * DEALINGS IN THE SOFTWARE. | 20 | * DEALINGS IN THE SOFTWARE. |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include "priv.h" | 23 | #include "acr.h" |
| 24 | 24 | #include "gm200.h" | |
| 25 | #include <core/gpuobj.h> | ||
| 26 | |||
| 27 | /* | ||
| 28 | * The BL header format used by GM20B's firmware is slightly different | ||
| 29 | * from the one of GM200. Fix the differences here. | ||
| 30 | */ | ||
| 31 | struct gm20b_flcn_bl_desc { | ||
| 32 | u32 reserved[4]; | ||
| 33 | u32 signature[4]; | ||
| 34 | u32 ctx_dma; | ||
| 35 | u32 code_dma_base; | ||
| 36 | u32 non_sec_code_off; | ||
| 37 | u32 non_sec_code_size; | ||
| 38 | u32 sec_code_off; | ||
| 39 | u32 sec_code_size; | ||
| 40 | u32 code_entry_point; | ||
| 41 | u32 data_dma_base; | ||
| 42 | u32 data_size; | ||
| 43 | }; | ||
| 44 | |||
| 45 | static int | ||
| 46 | gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb) | ||
| 47 | { | ||
| 48 | struct nvkm_subdev *subdev = &gsb->base.subdev; | ||
| 49 | int acr_size; | ||
| 50 | int ret; | ||
| 51 | |||
| 52 | ret = gm20x_secboot_prepare_blobs(gsb); | ||
| 53 | if (ret) | ||
| 54 | return ret; | ||
| 55 | |||
| 56 | acr_size = gsb->acr_load_blob->size; | ||
| 57 | /* | ||
| 58 | * On Tegra the WPR region is set by the bootloader. It is illegal for | ||
| 59 | * the HS blob to be larger than this region. | ||
| 60 | */ | ||
| 61 | if (acr_size > gsb->wpr_size) { | ||
| 62 | nvkm_error(subdev, "WPR region too small for FW blob!\n"); | ||
| 63 | nvkm_error(subdev, "required: %dB\n", acr_size); | ||
| 64 | nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size); | ||
| 65 | return -ENOSPC; | ||
| 66 | } | ||
| 67 | |||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | /** | ||
| 72 | * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW | ||
| 73 | * | ||
| 74 | * There is only a slight format difference (DMA addresses being 32-bits and | ||
| 75 | * 256B-aligned) to address. | ||
| 76 | */ | ||
| 77 | static void | ||
| 78 | gm20b_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret) | ||
| 79 | { | ||
| 80 | struct gm20b_flcn_bl_desc *gdesc = ret; | ||
| 81 | u64 addr; | ||
| 82 | |||
| 83 | memcpy(gdesc->reserved, desc->reserved, sizeof(gdesc->reserved)); | ||
| 84 | memcpy(gdesc->signature, desc->signature, sizeof(gdesc->signature)); | ||
| 85 | gdesc->ctx_dma = desc->ctx_dma; | ||
| 86 | addr = desc->code_dma_base.hi; | ||
| 87 | addr <<= 32; | ||
| 88 | addr |= desc->code_dma_base.lo; | ||
| 89 | gdesc->code_dma_base = lower_32_bits(addr >> 8); | ||
| 90 | gdesc->non_sec_code_off = desc->non_sec_code_off; | ||
| 91 | gdesc->non_sec_code_size = desc->non_sec_code_size; | ||
| 92 | gdesc->sec_code_off = desc->sec_code_off; | ||
| 93 | gdesc->sec_code_size = desc->sec_code_size; | ||
| 94 | gdesc->code_entry_point = desc->code_entry_point; | ||
| 95 | addr = desc->data_dma_base.hi; | ||
| 96 | addr <<= 32; | ||
| 97 | addr |= desc->data_dma_base.lo; | ||
| 98 | gdesc->data_dma_base = lower_32_bits(addr >> 8); | ||
| 99 | gdesc->data_size = desc->data_size; | ||
| 100 | } | ||
| 101 | |||
| 102 | static void | ||
| 103 | gm20b_secboot_fixup_hs_desc(struct gm200_secboot *gsb, | ||
| 104 | struct hsflcn_acr_desc *desc) | ||
| 105 | { | ||
| 106 | desc->ucode_blob_base = gsb->ls_blob->addr; | ||
| 107 | desc->ucode_blob_size = gsb->ls_blob->size; | ||
| 108 | |||
| 109 | desc->wpr_offset = 0; | ||
| 110 | } | ||
| 111 | |||
| 112 | static const struct gm200_secboot_func | ||
| 113 | gm20b_secboot_func = { | ||
| 114 | .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc), | ||
| 115 | .fixup_bl_desc = gm20b_secboot_fixup_bl_desc, | ||
| 116 | .fixup_hs_desc = gm20b_secboot_fixup_hs_desc, | ||
| 117 | .prepare_blobs = gm20b_secboot_prepare_blobs, | ||
| 118 | }; | ||
| 119 | |||
| 120 | 25 | ||
| 121 | #ifdef CONFIG_ARCH_TEGRA | 26 | #ifdef CONFIG_ARCH_TEGRA |
| 122 | #define TEGRA_MC_BASE 0x70019000 | 27 | #define TEGRA_MC_BASE 0x70019000 |
| @@ -144,15 +49,15 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb) | |||
| 144 | nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); | 49 | nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); |
| 145 | return PTR_ERR(mc); | 50 | return PTR_ERR(mc); |
| 146 | } | 51 | } |
| 147 | gsb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | | 52 | sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | |
| 148 | ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); | 53 | ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); |
| 149 | gsb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K) | 54 | sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K) |
| 150 | << 17; | 55 | << 17; |
| 151 | cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0); | 56 | cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0); |
| 152 | iounmap(mc); | 57 | iounmap(mc); |
| 153 | 58 | ||
| 154 | /* Check that WPR settings are valid */ | 59 | /* Check that WPR settings are valid */ |
| 155 | if (gsb->wpr_size == 0) { | 60 | if (sb->wpr_size == 0) { |
| 156 | nvkm_error(&sb->subdev, "WPR region is empty\n"); | 61 | nvkm_error(&sb->subdev, "WPR region is empty\n"); |
| 157 | return -EINVAL; | 62 | return -EINVAL; |
| 158 | } | 63 | } |
| @@ -174,7 +79,7 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb) | |||
| 174 | #endif | 79 | #endif |
| 175 | 80 | ||
| 176 | static int | 81 | static int |
| 177 | gm20b_secboot_init(struct nvkm_secboot *sb) | 82 | gm20b_secboot_oneinit(struct nvkm_secboot *sb) |
| 178 | { | 83 | { |
| 179 | struct gm200_secboot *gsb = gm200_secboot(sb); | 84 | struct gm200_secboot *gsb = gm200_secboot(sb); |
| 180 | int ret; | 85 | int ret; |
| @@ -183,17 +88,15 @@ gm20b_secboot_init(struct nvkm_secboot *sb) | |||
| 183 | if (ret) | 88 | if (ret) |
| 184 | return ret; | 89 | return ret; |
| 185 | 90 | ||
| 186 | return gm200_secboot_init(sb); | 91 | return gm200_secboot_oneinit(sb); |
| 187 | } | 92 | } |
| 188 | 93 | ||
| 189 | static const struct nvkm_secboot_func | 94 | static const struct nvkm_secboot_func |
| 190 | gm20b_secboot = { | 95 | gm20b_secboot = { |
| 191 | .dtor = gm200_secboot_dtor, | 96 | .dtor = gm200_secboot_dtor, |
| 192 | .init = gm20b_secboot_init, | 97 | .oneinit = gm20b_secboot_oneinit, |
| 193 | .reset = gm200_secboot_reset, | 98 | .fini = gm200_secboot_fini, |
| 194 | .start = gm200_secboot_start, | 99 | .run_blob = gm200_secboot_run_blob, |
| 195 | .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS), | ||
| 196 | .boot_falcon = NVKM_SECBOOT_FALCON_PMU, | ||
| 197 | }; | 100 | }; |
| 198 | 101 | ||
| 199 | int | 102 | int |
| @@ -202,6 +105,11 @@ gm20b_secboot_new(struct nvkm_device *device, int index, | |||
| 202 | { | 105 | { |
| 203 | int ret; | 106 | int ret; |
| 204 | struct gm200_secboot *gsb; | 107 | struct gm200_secboot *gsb; |
| 108 | struct nvkm_acr *acr; | ||
| 109 | |||
| 110 | acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS)); | ||
| 111 | if (IS_ERR(acr)) | ||
| 112 | return PTR_ERR(acr); | ||
| 205 | 113 | ||
| 206 | gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); | 114 | gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); |
| 207 | if (!gsb) { | 115 | if (!gsb) { |
| @@ -210,12 +118,10 @@ gm20b_secboot_new(struct nvkm_device *device, int index, | |||
| 210 | } | 118 | } |
| 211 | *psb = &gsb->base; | 119 | *psb = &gsb->base; |
| 212 | 120 | ||
| 213 | ret = nvkm_secboot_ctor(&gm20b_secboot, device, index, &gsb->base); | 121 | ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base); |
| 214 | if (ret) | 122 | if (ret) |
| 215 | return ret; | 123 | return ret; |
| 216 | 124 | ||
| 217 | gsb->func = &gm20b_secboot_func; | ||
| 218 | |||
| 219 | return 0; | 125 | return 0; |
| 220 | } | 126 | } |
| 221 | 127 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h new file mode 100644 index 000000000000..00886cee57eb --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h | |||
| @@ -0,0 +1,151 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef __NVKM_SECBOOT_LS_UCODE_H__ | ||
| 24 | #define __NVKM_SECBOOT_LS_UCODE_H__ | ||
| 25 | |||
| 26 | #include <core/os.h> | ||
| 27 | #include <core/subdev.h> | ||
| 28 | #include <subdev/secboot.h> | ||
| 29 | |||
| 30 | |||
| 31 | /** | ||
| 32 | * struct ls_ucode_img_desc - descriptor of firmware image | ||
| 33 | * @descriptor_size: size of this descriptor | ||
| 34 | * @image_size: size of the whole image | ||
| 35 | * @bootloader_start_offset: start offset of the bootloader in ucode image | ||
| 36 | * @bootloader_size: size of the bootloader | ||
| 37 | * @bootloader_imem_offset: start off set of the bootloader in IMEM | ||
| 38 | * @bootloader_entry_point: entry point of the bootloader in IMEM | ||
| 39 | * @app_start_offset: start offset of the LS firmware | ||
| 40 | * @app_size: size of the LS firmware's code and data | ||
| 41 | * @app_imem_offset: offset of the app in IMEM | ||
| 42 | * @app_imem_entry: entry point of the app in IMEM | ||
| 43 | * @app_dmem_offset: offset of the data in DMEM | ||
| 44 | * @app_resident_code_offset: offset of app code from app_start_offset | ||
| 45 | * @app_resident_code_size: size of the code | ||
| 46 | * @app_resident_data_offset: offset of data from app_start_offset | ||
| 47 | * @app_resident_data_size: size of data | ||
| 48 | * | ||
| 49 | * A firmware image contains the code, data, and bootloader of a given LS | ||
| 50 | * falcon in a single blob. This structure describes where everything is. | ||
| 51 | * | ||
| 52 | * This can be generated from a (bootloader, code, data) set if they have | ||
| 53 | * been loaded separately, or come directly from a file. | ||
| 54 | */ | ||
| 55 | struct ls_ucode_img_desc { | ||
| 56 | u32 descriptor_size; | ||
| 57 | u32 image_size; | ||
| 58 | u32 tools_version; | ||
| 59 | u32 app_version; | ||
| 60 | char date[64]; | ||
| 61 | u32 bootloader_start_offset; | ||
| 62 | u32 bootloader_size; | ||
| 63 | u32 bootloader_imem_offset; | ||
| 64 | u32 bootloader_entry_point; | ||
| 65 | u32 app_start_offset; | ||
| 66 | u32 app_size; | ||
| 67 | u32 app_imem_offset; | ||
| 68 | u32 app_imem_entry; | ||
| 69 | u32 app_dmem_offset; | ||
| 70 | u32 app_resident_code_offset; | ||
| 71 | u32 app_resident_code_size; | ||
| 72 | u32 app_resident_data_offset; | ||
| 73 | u32 app_resident_data_size; | ||
| 74 | u32 nb_overlays; | ||
| 75 | struct {u32 start; u32 size; } load_ovl[64]; | ||
| 76 | u32 compressed; | ||
| 77 | }; | ||
| 78 | |||
| 79 | /** | ||
| 80 | * struct ls_ucode_img - temporary storage for loaded LS firmwares | ||
| 81 | * @node: to link within lsf_ucode_mgr | ||
| 82 | * @falcon_id: ID of the falcon this LS firmware is for | ||
| 83 | * @ucode_desc: loaded or generated map of ucode_data | ||
| 84 | * @ucode_data: firmware payload (code and data) | ||
| 85 | * @ucode_size: size in bytes of data in ucode_data | ||
| 86 | * @sig: signature for this firmware | ||
| 87 | * @sig:size: size of the signature in bytes | ||
| 88 | * | ||
| 89 | * Preparing the WPR LS blob requires information about all the LS firmwares | ||
| 90 | * (size, etc) to be known. This structure contains all the data of one LS | ||
| 91 | * firmware. | ||
| 92 | */ | ||
| 93 | struct ls_ucode_img { | ||
| 94 | struct list_head node; | ||
| 95 | enum nvkm_secboot_falcon falcon_id; | ||
| 96 | |||
| 97 | struct ls_ucode_img_desc ucode_desc; | ||
| 98 | u8 *ucode_data; | ||
| 99 | u32 ucode_size; | ||
| 100 | |||
| 101 | u8 *sig; | ||
| 102 | u32 sig_size; | ||
| 103 | }; | ||
| 104 | |||
| 105 | /** | ||
| 106 | * struct fw_bin_header - header of firmware files | ||
| 107 | * @bin_magic: always 0x3b1d14f0 | ||
| 108 | * @bin_ver: version of the bin format | ||
| 109 | * @bin_size: entire image size including this header | ||
| 110 | * @header_offset: offset of the firmware/bootloader header in the file | ||
| 111 | * @data_offset: offset of the firmware/bootloader payload in the file | ||
| 112 | * @data_size: size of the payload | ||
| 113 | * | ||
| 114 | * This header is located at the beginning of the HS firmware and HS bootloader | ||
| 115 | * files, to describe where the headers and data can be found. | ||
| 116 | */ | ||
| 117 | struct fw_bin_header { | ||
| 118 | u32 bin_magic; | ||
| 119 | u32 bin_ver; | ||
| 120 | u32 bin_size; | ||
| 121 | u32 header_offset; | ||
| 122 | u32 data_offset; | ||
| 123 | u32 data_size; | ||
| 124 | }; | ||
| 125 | |||
| 126 | /** | ||
| 127 | * struct fw_bl_desc - firmware bootloader descriptor | ||
| 128 | * @start_tag: starting tag of bootloader | ||
| 129 | * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc | ||
| 130 | * @code_off: offset of code section | ||
| 131 | * @code_size: size of code section | ||
| 132 | * @data_off: offset of data section | ||
| 133 | * @data_size: size of data section | ||
| 134 | * | ||
| 135 | * This structure is embedded in bootloader firmware files at to describe the | ||
| 136 | * IMEM and DMEM layout expected by the bootloader. | ||
| 137 | */ | ||
| 138 | struct fw_bl_desc { | ||
| 139 | u32 start_tag; | ||
| 140 | u32 dmem_load_off; | ||
| 141 | u32 code_off; | ||
| 142 | u32 code_size; | ||
| 143 | u32 data_off; | ||
| 144 | u32 data_size; | ||
| 145 | }; | ||
| 146 | |||
| 147 | int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *); | ||
| 148 | int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *); | ||
| 149 | |||
| 150 | |||
| 151 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c new file mode 100644 index 000000000000..40a6df77bb8a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c | |||
| @@ -0,0 +1,158 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | |||
| 24 | #include "ls_ucode.h" | ||
| 25 | #include "acr.h" | ||
| 26 | |||
| 27 | #include <core/firmware.h> | ||
| 28 | |||
| 29 | #define BL_DESC_BLK_SIZE 256 | ||
| 30 | /** | ||
| 31 | * Build a ucode image and descriptor from provided bootloader, code and data. | ||
| 32 | * | ||
| 33 | * @bl: bootloader image, including 16-bytes descriptor | ||
| 34 | * @code: LS firmware code segment | ||
| 35 | * @data: LS firmware data segment | ||
| 36 | * @desc: ucode descriptor to be written | ||
| 37 | * | ||
| 38 | * Return: allocated ucode image with corresponding descriptor information. desc | ||
| 39 | * is also updated to contain the right offsets within returned image. | ||
| 40 | */ | ||
| 41 | static void * | ||
| 42 | ls_ucode_img_build(const struct firmware *bl, const struct firmware *code, | ||
| 43 | const struct firmware *data, struct ls_ucode_img_desc *desc) | ||
| 44 | { | ||
| 45 | struct fw_bin_header *bin_hdr = (void *)bl->data; | ||
| 46 | struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset; | ||
| 47 | void *bl_data = (void *)bl->data + bin_hdr->data_offset; | ||
| 48 | u32 pos = 0; | ||
| 49 | void *image; | ||
| 50 | |||
| 51 | desc->bootloader_start_offset = pos; | ||
| 52 | desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32)); | ||
| 53 | desc->bootloader_imem_offset = bl_desc->start_tag * 256; | ||
| 54 | desc->bootloader_entry_point = bl_desc->start_tag * 256; | ||
| 55 | |||
| 56 | pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE); | ||
| 57 | desc->app_start_offset = pos; | ||
| 58 | desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) + | ||
| 59 | ALIGN(data->size, BL_DESC_BLK_SIZE); | ||
| 60 | desc->app_imem_offset = 0; | ||
| 61 | desc->app_imem_entry = 0; | ||
| 62 | desc->app_dmem_offset = 0; | ||
| 63 | desc->app_resident_code_offset = 0; | ||
| 64 | desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE); | ||
| 65 | |||
| 66 | pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE); | ||
| 67 | desc->app_resident_data_offset = pos - desc->app_start_offset; | ||
| 68 | desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE); | ||
| 69 | |||
| 70 | desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) + | ||
| 71 | desc->app_size; | ||
| 72 | |||
| 73 | image = kzalloc(desc->image_size, GFP_KERNEL); | ||
| 74 | if (!image) | ||
| 75 | return ERR_PTR(-ENOMEM); | ||
| 76 | |||
| 77 | memcpy(image + desc->bootloader_start_offset, bl_data, | ||
| 78 | bl_desc->code_size); | ||
| 79 | memcpy(image + desc->app_start_offset, code->data, code->size); | ||
| 80 | memcpy(image + desc->app_start_offset + desc->app_resident_data_offset, | ||
| 81 | data->data, data->size); | ||
| 82 | |||
| 83 | return image; | ||
| 84 | } | ||
| 85 | |||
| 86 | /** | ||
| 87 | * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image | ||
| 88 | * | ||
| 89 | * Load the LS microcode, bootloader and signature and pack them into a single | ||
| 90 | * blob. Also generate the corresponding ucode descriptor. | ||
| 91 | */ | ||
| 92 | static int | ||
| 93 | ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img, | ||
| 94 | const char *falcon_name) | ||
| 95 | { | ||
| 96 | const struct firmware *bl, *code, *data, *sig; | ||
| 97 | char f[64]; | ||
| 98 | int ret; | ||
| 99 | |||
| 100 | snprintf(f, sizeof(f), "gr/%s_bl", falcon_name); | ||
| 101 | ret = nvkm_firmware_get(subdev->device, f, &bl); | ||
| 102 | if (ret) | ||
| 103 | goto error; | ||
| 104 | |||
| 105 | snprintf(f, sizeof(f), "gr/%s_inst", falcon_name); | ||
| 106 | ret = nvkm_firmware_get(subdev->device, f, &code); | ||
| 107 | if (ret) | ||
| 108 | goto free_bl; | ||
| 109 | |||
| 110 | snprintf(f, sizeof(f), "gr/%s_data", falcon_name); | ||
| 111 | ret = nvkm_firmware_get(subdev->device, f, &data); | ||
| 112 | if (ret) | ||
| 113 | goto free_inst; | ||
| 114 | |||
| 115 | snprintf(f, sizeof(f), "gr/%s_sig", falcon_name); | ||
| 116 | ret = nvkm_firmware_get(subdev->device, f, &sig); | ||
| 117 | if (ret) | ||
| 118 | goto free_data; | ||
| 119 | img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); | ||
| 120 | if (!img->sig) { | ||
| 121 | ret = -ENOMEM; | ||
| 122 | goto free_sig; | ||
| 123 | } | ||
| 124 | img->sig_size = sig->size; | ||
| 125 | |||
| 126 | img->ucode_data = ls_ucode_img_build(bl, code, data, | ||
| 127 | &img->ucode_desc); | ||
| 128 | if (IS_ERR(img->ucode_data)) { | ||
| 129 | ret = PTR_ERR(img->ucode_data); | ||
| 130 | goto free_data; | ||
| 131 | } | ||
| 132 | img->ucode_size = img->ucode_desc.image_size; | ||
| 133 | |||
| 134 | free_sig: | ||
| 135 | nvkm_firmware_put(sig); | ||
| 136 | free_data: | ||
| 137 | nvkm_firmware_put(data); | ||
| 138 | free_inst: | ||
| 139 | nvkm_firmware_put(code); | ||
| 140 | free_bl: | ||
| 141 | nvkm_firmware_put(bl); | ||
| 142 | error: | ||
| 143 | return ret; | ||
| 144 | } | ||
| 145 | |||
| 146 | int | ||
| 147 | acr_ls_ucode_load_fecs(const struct nvkm_subdev *subdev, | ||
| 148 | struct ls_ucode_img *img) | ||
| 149 | { | ||
| 150 | return ls_ucode_img_load_gr(subdev, img, "fecs"); | ||
| 151 | } | ||
| 152 | |||
| 153 | int | ||
| 154 | acr_ls_ucode_load_gpccs(const struct nvkm_subdev *subdev, | ||
| 155 | struct ls_ucode_img *img) | ||
| 156 | { | ||
| 157 | return ls_ucode_img_load_gr(subdev, img, "gpccs"); | ||
| 158 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h index a9a8a0e1017e..936a65f5658c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h | |||
| @@ -27,20 +27,16 @@ | |||
| 27 | #include <subdev/mmu.h> | 27 | #include <subdev/mmu.h> |
| 28 | 28 | ||
| 29 | struct nvkm_secboot_func { | 29 | struct nvkm_secboot_func { |
| 30 | int (*init)(struct nvkm_secboot *); | 30 | int (*oneinit)(struct nvkm_secboot *); |
| 31 | int (*fini)(struct nvkm_secboot *, bool suspend); | 31 | int (*fini)(struct nvkm_secboot *, bool suspend); |
| 32 | void *(*dtor)(struct nvkm_secboot *); | 32 | void *(*dtor)(struct nvkm_secboot *); |
| 33 | int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon); | 33 | int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *); |
| 34 | int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon); | ||
| 35 | |||
| 36 | /* ID of the falcon that will perform secure boot */ | ||
| 37 | enum nvkm_secboot_falcon boot_falcon; | ||
| 38 | /* Bit-mask of IDs of managed falcons */ | ||
| 39 | unsigned long managed_falcons; | ||
| 40 | }; | 34 | }; |
| 41 | 35 | ||
| 42 | int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_device *, | 36 | extern const char *nvkm_secboot_falcon_name[]; |
| 43 | int index, struct nvkm_secboot *); | 37 | |
| 38 | int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *, | ||
| 39 | struct nvkm_device *, int, struct nvkm_secboot *); | ||
| 44 | int nvkm_secboot_falcon_reset(struct nvkm_secboot *); | 40 | int nvkm_secboot_falcon_reset(struct nvkm_secboot *); |
| 45 | int nvkm_secboot_falcon_run(struct nvkm_secboot *); | 41 | int nvkm_secboot_falcon_run(struct nvkm_secboot *); |
| 46 | 42 | ||
| @@ -48,187 +44,20 @@ struct flcn_u64 { | |||
| 48 | u32 lo; | 44 | u32 lo; |
| 49 | u32 hi; | 45 | u32 hi; |
| 50 | }; | 46 | }; |
| 47 | |||
| 51 | static inline u64 flcn64_to_u64(const struct flcn_u64 f) | 48 | static inline u64 flcn64_to_u64(const struct flcn_u64 f) |
| 52 | { | 49 | { |
| 53 | return ((u64)f.hi) << 32 | f.lo; | 50 | return ((u64)f.hi) << 32 | f.lo; |
| 54 | } | 51 | } |
| 55 | 52 | ||
| 56 | /** | 53 | static inline struct flcn_u64 u64_to_flcn64(u64 u) |
| 57 | * struct gm200_flcn_bl_desc - DMEM bootloader descriptor | 54 | { |
| 58 | * @signature: 16B signature for secure code. 0s if no secure code | 55 | struct flcn_u64 ret; |
| 59 | * @ctx_dma: DMA context to be used by BL while loading code/data | ||
| 60 | * @code_dma_base: 256B-aligned Physical FB Address where code is located | ||
| 61 | * (falcon's $xcbase register) | ||
| 62 | * @non_sec_code_off: offset from code_dma_base where the non-secure code is | ||
| 63 | * located. The offset must be multiple of 256 to help perf | ||
| 64 | * @non_sec_code_size: the size of the nonSecure code part. | ||
| 65 | * @sec_code_off: offset from code_dma_base where the secure code is | ||
| 66 | * located. The offset must be multiple of 256 to help perf | ||
| 67 | * @sec_code_size: offset from code_dma_base where the secure code is | ||
| 68 | * located. The offset must be multiple of 256 to help perf | ||
| 69 | * @code_entry_point: code entry point which will be invoked by BL after | ||
| 70 | * code is loaded. | ||
| 71 | * @data_dma_base: 256B aligned Physical FB Address where data is located. | ||
| 72 | * (falcon's $xdbase register) | ||
| 73 | * @data_size: size of data block. Should be multiple of 256B | ||
| 74 | * | ||
| 75 | * Structure used by the bootloader to load the rest of the code. This has | ||
| 76 | * to be filled by host and copied into DMEM at offset provided in the | ||
| 77 | * hsflcn_bl_desc.bl_desc_dmem_load_off. | ||
| 78 | */ | ||
| 79 | struct gm200_flcn_bl_desc { | ||
| 80 | u32 reserved[4]; | ||
| 81 | u32 signature[4]; | ||
| 82 | u32 ctx_dma; | ||
| 83 | struct flcn_u64 code_dma_base; | ||
| 84 | u32 non_sec_code_off; | ||
| 85 | u32 non_sec_code_size; | ||
| 86 | u32 sec_code_off; | ||
| 87 | u32 sec_code_size; | ||
| 88 | u32 code_entry_point; | ||
| 89 | struct flcn_u64 data_dma_base; | ||
| 90 | u32 data_size; | ||
| 91 | }; | ||
| 92 | |||
| 93 | /** | ||
| 94 | * struct hsflcn_acr_desc - data section of the HS firmware | ||
| 95 | * | ||
| 96 | * This header is to be copied at the beginning of DMEM by the HS bootloader. | ||
| 97 | * | ||
| 98 | * @signature: signature of ACR ucode | ||
| 99 | * @wpr_region_id: region ID holding the WPR header and its details | ||
| 100 | * @wpr_offset: offset from the WPR region holding the wpr header | ||
| 101 | * @regions: region descriptors | ||
| 102 | * @nonwpr_ucode_blob_size: size of LS blob | ||
| 103 | * @nonwpr_ucode_blob_start: FB location of LS blob is | ||
| 104 | */ | ||
| 105 | struct hsflcn_acr_desc { | ||
| 106 | union { | ||
| 107 | u8 reserved_dmem[0x200]; | ||
| 108 | u32 signatures[4]; | ||
| 109 | } ucode_reserved_space; | ||
| 110 | u32 wpr_region_id; | ||
| 111 | u32 wpr_offset; | ||
| 112 | u32 mmu_mem_range; | ||
| 113 | #define FLCN_ACR_MAX_REGIONS 2 | ||
| 114 | struct { | ||
| 115 | u32 no_regions; | ||
| 116 | struct { | ||
| 117 | u32 start_addr; | ||
| 118 | u32 end_addr; | ||
| 119 | u32 region_id; | ||
| 120 | u32 read_mask; | ||
| 121 | u32 write_mask; | ||
| 122 | u32 client_mask; | ||
| 123 | } region_props[FLCN_ACR_MAX_REGIONS]; | ||
| 124 | } regions; | ||
| 125 | u32 ucode_blob_size; | ||
| 126 | u64 ucode_blob_base __aligned(8); | ||
| 127 | struct { | ||
| 128 | u32 vpr_enabled; | ||
| 129 | u32 vpr_start; | ||
| 130 | u32 vpr_end; | ||
| 131 | u32 hdcp_policies; | ||
| 132 | } vpr_desc; | ||
| 133 | }; | ||
| 134 | |||
| 135 | /** | ||
| 136 | * Contains the whole secure boot state, allowing it to be performed as needed | ||
| 137 | * @wpr_addr: physical address of the WPR region | ||
| 138 | * @wpr_size: size in bytes of the WPR region | ||
| 139 | * @ls_blob: LS blob of all the LS firmwares, signatures, bootloaders | ||
| 140 | * @ls_blob_size: size of the LS blob | ||
| 141 | * @ls_blob_nb_regions: number of LS firmwares that will be loaded | ||
| 142 | * @acr_blob: HS blob | ||
| 143 | * @acr_blob_vma: mapping of the HS blob into the secure falcon's VM | ||
| 144 | * @acr_bl_desc: bootloader descriptor of the HS blob | ||
| 145 | * @hsbl_blob: HS blob bootloader | ||
| 146 | * @inst: instance block for HS falcon | ||
| 147 | * @pgd: page directory for the HS falcon | ||
| 148 | * @vm: address space used by the HS falcon | ||
| 149 | * @falcon_state: current state of the managed falcons | ||
| 150 | * @firmware_ok: whether the firmware blobs have been created | ||
| 151 | */ | ||
| 152 | struct gm200_secboot { | ||
| 153 | struct nvkm_secboot base; | ||
| 154 | const struct gm200_secboot_func *func; | ||
| 155 | |||
| 156 | /* | ||
| 157 | * Address and size of the WPR region. On dGPU this will be the | ||
| 158 | * address of the LS blob. On Tegra this is a fixed region set by the | ||
| 159 | * bootloader | ||
| 160 | */ | ||
| 161 | u64 wpr_addr; | ||
| 162 | u32 wpr_size; | ||
| 163 | |||
| 164 | /* | ||
| 165 | * HS FW - lock WPR region (dGPU only) and load LS FWs | ||
| 166 | * on Tegra the HS FW copies the LS blob into the fixed WPR instead | ||
| 167 | */ | ||
| 168 | struct nvkm_gpuobj *acr_load_blob; | ||
| 169 | struct gm200_flcn_bl_desc acr_load_bl_desc; | ||
| 170 | |||
| 171 | /* HS FW - unlock WPR region (dGPU only) */ | ||
| 172 | struct nvkm_gpuobj *acr_unload_blob; | ||
| 173 | struct gm200_flcn_bl_desc acr_unload_bl_desc; | ||
| 174 | |||
| 175 | /* HS bootloader */ | ||
| 176 | void *hsbl_blob; | ||
| 177 | |||
| 178 | /* LS FWs, to be loaded by the HS ACR */ | ||
| 179 | struct nvkm_gpuobj *ls_blob; | ||
| 180 | |||
| 181 | /* Instance block & address space used for HS FW execution */ | ||
| 182 | struct nvkm_gpuobj *inst; | ||
| 183 | struct nvkm_gpuobj *pgd; | ||
| 184 | struct nvkm_vm *vm; | ||
| 185 | |||
| 186 | /* To keep track of the state of all managed falcons */ | ||
| 187 | enum { | ||
| 188 | /* In non-secure state, no firmware loaded, no privileges*/ | ||
| 189 | NON_SECURE = 0, | ||
| 190 | /* In low-secure mode and ready to be started */ | ||
| 191 | RESET, | ||
| 192 | /* In low-secure mode and running */ | ||
| 193 | RUNNING, | ||
| 194 | } falcon_state[NVKM_SECBOOT_FALCON_END]; | ||
| 195 | |||
| 196 | bool firmware_ok; | ||
| 197 | }; | ||
| 198 | #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) | ||
| 199 | |||
| 200 | /** | ||
| 201 | * Contains functions we wish to abstract between GM200-like implementations | ||
| 202 | * @bl_desc_size: size of the BL descriptor used by this chip. | ||
| 203 | * @fixup_bl_desc: hook that generates the proper BL descriptor format from | ||
| 204 | * the generic GM200 format into a data array of size | ||
| 205 | * bl_desc_size | ||
| 206 | * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used | ||
| 207 | * @prepare_blobs: prepares the various blobs needed for secure booting | ||
| 208 | */ | ||
| 209 | struct gm200_secboot_func { | ||
| 210 | /* | ||
| 211 | * Size of the bootloader descriptor for this chip. A block of this | ||
| 212 | * size is allocated before booting a falcon and the fixup_bl_desc | ||
| 213 | * callback is called on it | ||
| 214 | */ | ||
| 215 | u32 bl_desc_size; | ||
| 216 | void (*fixup_bl_desc)(const struct gm200_flcn_bl_desc *, void *); | ||
| 217 | |||
| 218 | /* | ||
| 219 | * Chip-specific modifications of the HS descriptor can be done here. | ||
| 220 | * On dGPU this is used to fill the information about the WPR region | ||
| 221 | * we want the HS FW to set up. | ||
| 222 | */ | ||
| 223 | void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *); | ||
| 224 | int (*prepare_blobs)(struct gm200_secboot *); | ||
| 225 | }; | ||
| 226 | 56 | ||
| 227 | int gm200_secboot_init(struct nvkm_secboot *); | 57 | ret.hi = upper_32_bits(u); |
| 228 | void *gm200_secboot_dtor(struct nvkm_secboot *); | 58 | ret.lo = lower_32_bits(u); |
| 229 | int gm200_secboot_reset(struct nvkm_secboot *, u32); | ||
| 230 | int gm200_secboot_start(struct nvkm_secboot *, u32); | ||
| 231 | 59 | ||
| 232 | int gm20x_secboot_prepare_blobs(struct gm200_secboot *); | 60 | return ret; |
| 61 | } | ||
| 233 | 62 | ||
| 234 | #endif | 63 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index 8894fee30cbc..df949fa7d05d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c | |||
| @@ -64,10 +64,9 @@ nvkm_therm_update_trip(struct nvkm_therm *therm) | |||
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | static int | 66 | static int |
| 67 | nvkm_therm_update_linear(struct nvkm_therm *therm) | 67 | nvkm_therm_compute_linear_duty(struct nvkm_therm *therm, u8 linear_min_temp, |
| 68 | u8 linear_max_temp) | ||
| 68 | { | 69 | { |
| 69 | u8 linear_min_temp = therm->fan->bios.linear_min_temp; | ||
| 70 | u8 linear_max_temp = therm->fan->bios.linear_max_temp; | ||
| 71 | u8 temp = therm->func->temp_get(therm); | 70 | u8 temp = therm->func->temp_get(therm); |
| 72 | u16 duty; | 71 | u16 duty; |
| 73 | 72 | ||
| @@ -85,6 +84,21 @@ nvkm_therm_update_linear(struct nvkm_therm *therm) | |||
| 85 | return duty; | 84 | return duty; |
| 86 | } | 85 | } |
| 87 | 86 | ||
| 87 | static int | ||
| 88 | nvkm_therm_update_linear(struct nvkm_therm *therm) | ||
| 89 | { | ||
| 90 | u8 min = therm->fan->bios.linear_min_temp; | ||
| 91 | u8 max = therm->fan->bios.linear_max_temp; | ||
| 92 | return nvkm_therm_compute_linear_duty(therm, min, max); | ||
| 93 | } | ||
| 94 | |||
| 95 | static int | ||
| 96 | nvkm_therm_update_linear_fallback(struct nvkm_therm *therm) | ||
| 97 | { | ||
| 98 | u8 max = therm->bios_sensor.thrs_fan_boost.temp; | ||
| 99 | return nvkm_therm_compute_linear_duty(therm, 30, max); | ||
| 100 | } | ||
| 101 | |||
| 88 | static void | 102 | static void |
| 89 | nvkm_therm_update(struct nvkm_therm *therm, int mode) | 103 | nvkm_therm_update(struct nvkm_therm *therm, int mode) |
| 90 | { | 104 | { |
| @@ -119,6 +133,8 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode) | |||
| 119 | case NVBIOS_THERM_FAN_OTHER: | 133 | case NVBIOS_THERM_FAN_OTHER: |
| 120 | if (therm->cstate) | 134 | if (therm->cstate) |
| 121 | duty = therm->cstate; | 135 | duty = therm->cstate; |
| 136 | else | ||
| 137 | duty = nvkm_therm_update_linear_fallback(therm); | ||
| 122 | poll = false; | 138 | poll = false; |
| 123 | break; | 139 | break; |
| 124 | } | 140 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c index fe063d5728e2..67ada1d9a28c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c | |||
| @@ -95,6 +95,20 @@ nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs) | |||
| 95 | return intr & ~handled; | 95 | return intr & ~handled; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | int | ||
| 99 | nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx) | ||
| 100 | { | ||
| 101 | struct nvkm_top *top = device->top; | ||
| 102 | struct nvkm_top_device *info; | ||
| 103 | |||
| 104 | list_for_each_entry(info, &top->device, head) { | ||
| 105 | if (info->index == devidx && info->fault >= 0) | ||
| 106 | return info->fault; | ||
| 107 | } | ||
| 108 | |||
| 109 | return -ENOENT; | ||
| 110 | } | ||
| 111 | |||
| 98 | enum nvkm_devidx | 112 | enum nvkm_devidx |
| 99 | nvkm_top_fault(struct nvkm_device *device, int fault) | 113 | nvkm_top_fault(struct nvkm_device *device, int fault) |
| 100 | { | 114 | { |
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 5554b72cf56a..d956e6266368 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c | |||
| @@ -2506,6 +2506,25 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk, | |||
| 2506 | return -EINVAL; | 2506 | return -EINVAL; |
| 2507 | } | 2507 | } |
| 2508 | 2508 | ||
| 2509 | if (*decim_x > 4 && color_mode != OMAP_DSS_COLOR_NV12) { | ||
| 2510 | /* | ||
| 2511 | * Let's disable all scaling that requires horizontal | ||
| 2512 | * decimation with higher factor than 4, until we have | ||
| 2513 | * better estimates of what we can and can not | ||
| 2514 | * do. However, NV12 color format appears to work Ok | ||
| 2515 | * with all decimation factors. | ||
| 2516 | * | ||
| 2517 | * When decimating horizontally by more that 4 the dss | ||
| 2518 | * is not able to fetch the data in burst mode. When | ||
| 2519 | * this happens it is hard to tell if there enough | ||
| 2520 | * bandwidth. Despite what theory says this appears to | ||
| 2521 | * be true also for 16-bit color formats. | ||
| 2522 | */ | ||
| 2523 | DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)", *decim_x); | ||
| 2524 | |||
| 2525 | return -EINVAL; | ||
| 2526 | } | ||
| 2527 | |||
| 2509 | *core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height, | 2528 | *core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height, |
| 2510 | out_width, out_height, mem_to_mem); | 2529 | out_width, out_height, mem_to_mem); |
| 2511 | return 0; | 2530 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index bd18e8c4f1d0..2fe735c269fc 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c | |||
| @@ -410,13 +410,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 410 | dispc_mgr_set_gamma(omap_crtc->channel, lut, length); | 410 | dispc_mgr_set_gamma(omap_crtc->channel, lut, length); |
| 411 | } | 411 | } |
| 412 | 412 | ||
| 413 | /* | 413 | /* Only flush the CRTC if it is currently enabled. */ |
| 414 | * Only flush the CRTC if it is currently enabled. CRTCs that require a | ||
| 415 | * mode set are disabled prior plane updates and enabled afterwards. | ||
| 416 | * They are thus not active (regardless of what their CRTC core state | ||
| 417 | * reports) and the DRM core could thus call this function even though | ||
| 418 | * the CRTC is currently disabled. Do nothing in that case. | ||
| 419 | */ | ||
| 420 | if (!omap_crtc->enabled) | 414 | if (!omap_crtc->enabled) |
| 421 | return; | 415 | return; |
| 422 | 416 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 586ed630d458..79a4aad35e0f 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c | |||
| @@ -96,9 +96,22 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit) | |||
| 96 | dispc_runtime_get(); | 96 | dispc_runtime_get(); |
| 97 | 97 | ||
| 98 | drm_atomic_helper_commit_modeset_disables(dev, old_state); | 98 | drm_atomic_helper_commit_modeset_disables(dev, old_state); |
| 99 | drm_atomic_helper_commit_planes(dev, old_state, | 99 | |
| 100 | DRM_PLANE_COMMIT_ACTIVE_ONLY); | 100 | /* With the current dss dispc implementation we have to enable |
| 101 | * the new modeset before we can commit planes. The dispc ovl | ||
| 102 | * configuration relies on the video mode configuration been | ||
| 103 | * written into the HW when the ovl configuration is | ||
| 104 | * calculated. | ||
| 105 | * | ||
| 106 | * This approach is not ideal because after a mode change the | ||
| 107 | * plane update is executed only after the first vblank | ||
| 108 | * interrupt. The dispc implementation should be fixed so that | ||
| 109 | * it is able use uncommitted drm state information. | ||
| 110 | */ | ||
| 101 | drm_atomic_helper_commit_modeset_enables(dev, old_state); | 111 | drm_atomic_helper_commit_modeset_enables(dev, old_state); |
| 112 | omap_atomic_wait_for_completion(dev, old_state); | ||
| 113 | |||
| 114 | drm_atomic_helper_commit_planes(dev, old_state, 0); | ||
| 102 | 115 | ||
| 103 | omap_atomic_wait_for_completion(dev, old_state); | 116 | omap_atomic_wait_for_completion(dev, old_state); |
| 104 | 117 | ||
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 06aaf79de8c8..89eb0422821c 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c | |||
| @@ -668,6 +668,48 @@ static const struct panel_desc avic_tm070ddh03 = { | |||
| 668 | }, | 668 | }, |
| 669 | }; | 669 | }; |
| 670 | 670 | ||
| 671 | static const struct drm_display_mode boe_nv101wxmn51_modes[] = { | ||
| 672 | { | ||
| 673 | .clock = 71900, | ||
| 674 | .hdisplay = 1280, | ||
| 675 | .hsync_start = 1280 + 48, | ||
| 676 | .hsync_end = 1280 + 48 + 32, | ||
| 677 | .htotal = 1280 + 48 + 32 + 80, | ||
| 678 | .vdisplay = 800, | ||
| 679 | .vsync_start = 800 + 3, | ||
| 680 | .vsync_end = 800 + 3 + 5, | ||
| 681 | .vtotal = 800 + 3 + 5 + 24, | ||
| 682 | .vrefresh = 60, | ||
| 683 | }, | ||
| 684 | { | ||
| 685 | .clock = 57500, | ||
| 686 | .hdisplay = 1280, | ||
| 687 | .hsync_start = 1280 + 48, | ||
| 688 | .hsync_end = 1280 + 48 + 32, | ||
| 689 | .htotal = 1280 + 48 + 32 + 80, | ||
| 690 | .vdisplay = 800, | ||
| 691 | .vsync_start = 800 + 3, | ||
| 692 | .vsync_end = 800 + 3 + 5, | ||
| 693 | .vtotal = 800 + 3 + 5 + 24, | ||
| 694 | .vrefresh = 48, | ||
| 695 | }, | ||
| 696 | }; | ||
| 697 | |||
| 698 | static const struct panel_desc boe_nv101wxmn51 = { | ||
| 699 | .modes = boe_nv101wxmn51_modes, | ||
| 700 | .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes), | ||
| 701 | .bpc = 8, | ||
| 702 | .size = { | ||
| 703 | .width = 217, | ||
| 704 | .height = 136, | ||
| 705 | }, | ||
| 706 | .delay = { | ||
| 707 | .prepare = 210, | ||
| 708 | .enable = 50, | ||
| 709 | .unprepare = 160, | ||
| 710 | }, | ||
| 711 | }; | ||
| 712 | |||
| 671 | static const struct drm_display_mode chunghwa_claa070wp03xg_mode = { | 713 | static const struct drm_display_mode chunghwa_claa070wp03xg_mode = { |
| 672 | .clock = 66770, | 714 | .clock = 66770, |
| 673 | .hdisplay = 800, | 715 | .hdisplay = 800, |
| @@ -760,6 +802,8 @@ static const struct panel_desc edt_et057090dhu = { | |||
| 760 | .width = 115, | 802 | .width = 115, |
| 761 | .height = 86, | 803 | .height = 86, |
| 762 | }, | 804 | }, |
| 805 | .bus_format = MEDIA_BUS_FMT_RGB666_1X18, | ||
| 806 | .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, | ||
| 763 | }; | 807 | }; |
| 764 | 808 | ||
| 765 | static const struct drm_display_mode edt_etm0700g0dh6_mode = { | 809 | static const struct drm_display_mode edt_etm0700g0dh6_mode = { |
| @@ -784,6 +828,8 @@ static const struct panel_desc edt_etm0700g0dh6 = { | |||
| 784 | .width = 152, | 828 | .width = 152, |
| 785 | .height = 91, | 829 | .height = 91, |
| 786 | }, | 830 | }, |
| 831 | .bus_format = MEDIA_BUS_FMT_RGB666_1X18, | ||
| 832 | .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, | ||
| 787 | }; | 833 | }; |
| 788 | 834 | ||
| 789 | static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = { | 835 | static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = { |
| @@ -1277,6 +1323,29 @@ static const struct panel_desc nec_nl4827hc19_05b = { | |||
| 1277 | .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, | 1323 | .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, |
| 1278 | }; | 1324 | }; |
| 1279 | 1325 | ||
| 1326 | static const struct drm_display_mode netron_dy_e231732_mode = { | ||
| 1327 | .clock = 66000, | ||
| 1328 | .hdisplay = 1024, | ||
| 1329 | .hsync_start = 1024 + 160, | ||
| 1330 | .hsync_end = 1024 + 160 + 70, | ||
| 1331 | .htotal = 1024 + 160 + 70 + 90, | ||
| 1332 | .vdisplay = 600, | ||
| 1333 | .vsync_start = 600 + 127, | ||
| 1334 | .vsync_end = 600 + 127 + 20, | ||
| 1335 | .vtotal = 600 + 127 + 20 + 3, | ||
| 1336 | .vrefresh = 60, | ||
| 1337 | }; | ||
| 1338 | |||
| 1339 | static const struct panel_desc netron_dy_e231732 = { | ||
| 1340 | .modes = &netron_dy_e231732_mode, | ||
| 1341 | .num_modes = 1, | ||
| 1342 | .size = { | ||
| 1343 | .width = 154, | ||
| 1344 | .height = 87, | ||
| 1345 | }, | ||
| 1346 | .bus_format = MEDIA_BUS_FMT_RGB666_1X18, | ||
| 1347 | }; | ||
| 1348 | |||
| 1280 | static const struct drm_display_mode nvd_9128_mode = { | 1349 | static const struct drm_display_mode nvd_9128_mode = { |
| 1281 | .clock = 29500, | 1350 | .clock = 29500, |
| 1282 | .hdisplay = 800, | 1351 | .hdisplay = 800, |
| @@ -1632,6 +1701,30 @@ static const struct panel_desc starry_kr122ea0sra = { | |||
| 1632 | }, | 1701 | }, |
| 1633 | }; | 1702 | }; |
| 1634 | 1703 | ||
| 1704 | static const struct display_timing tianma_tm070jdhg30_timing = { | ||
| 1705 | .pixelclock = { 62600000, 68200000, 78100000 }, | ||
| 1706 | .hactive = { 1280, 1280, 1280 }, | ||
| 1707 | .hfront_porch = { 15, 64, 159 }, | ||
| 1708 | .hback_porch = { 5, 5, 5 }, | ||
| 1709 | .hsync_len = { 1, 1, 256 }, | ||
| 1710 | .vactive = { 800, 800, 800 }, | ||
| 1711 | .vfront_porch = { 3, 40, 99 }, | ||
| 1712 | .vback_porch = { 2, 2, 2 }, | ||
| 1713 | .vsync_len = { 1, 1, 128 }, | ||
| 1714 | .flags = DISPLAY_FLAGS_DE_HIGH, | ||
| 1715 | }; | ||
| 1716 | |||
| 1717 | static const struct panel_desc tianma_tm070jdhg30 = { | ||
| 1718 | .timings = &tianma_tm070jdhg30_timing, | ||
| 1719 | .num_timings = 1, | ||
| 1720 | .bpc = 8, | ||
| 1721 | .size = { | ||
| 1722 | .width = 151, | ||
| 1723 | .height = 95, | ||
| 1724 | }, | ||
| 1725 | .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, | ||
| 1726 | }; | ||
| 1727 | |||
| 1635 | static const struct drm_display_mode tpk_f07a_0102_mode = { | 1728 | static const struct drm_display_mode tpk_f07a_0102_mode = { |
| 1636 | .clock = 33260, | 1729 | .clock = 33260, |
| 1637 | .hdisplay = 800, | 1730 | .hdisplay = 800, |
| @@ -1748,6 +1841,9 @@ static const struct of_device_id platform_of_match[] = { | |||
| 1748 | .compatible = "avic,tm070ddh03", | 1841 | .compatible = "avic,tm070ddh03", |
| 1749 | .data = &avic_tm070ddh03, | 1842 | .data = &avic_tm070ddh03, |
| 1750 | }, { | 1843 | }, { |
| 1844 | .compatible = "boe,nv101wxmn51", | ||
| 1845 | .data = &boe_nv101wxmn51, | ||
| 1846 | }, { | ||
| 1751 | .compatible = "chunghwa,claa070wp03xg", | 1847 | .compatible = "chunghwa,claa070wp03xg", |
| 1752 | .data = &chunghwa_claa070wp03xg, | 1848 | .data = &chunghwa_claa070wp03xg, |
| 1753 | }, { | 1849 | }, { |
| @@ -1826,6 +1922,9 @@ static const struct of_device_id platform_of_match[] = { | |||
| 1826 | .compatible = "nec,nl4827hc19-05b", | 1922 | .compatible = "nec,nl4827hc19-05b", |
| 1827 | .data = &nec_nl4827hc19_05b, | 1923 | .data = &nec_nl4827hc19_05b, |
| 1828 | }, { | 1924 | }, { |
| 1925 | .compatible = "netron-dy,e231732", | ||
| 1926 | .data = &netron_dy_e231732, | ||
| 1927 | }, { | ||
| 1829 | .compatible = "nvd,9128", | 1928 | .compatible = "nvd,9128", |
| 1830 | .data = &nvd_9128, | 1929 | .data = &nvd_9128, |
| 1831 | }, { | 1930 | }, { |
| @@ -1868,6 +1967,9 @@ static const struct of_device_id platform_of_match[] = { | |||
| 1868 | .compatible = "starry,kr122ea0sra", | 1967 | .compatible = "starry,kr122ea0sra", |
| 1869 | .data = &starry_kr122ea0sra, | 1968 | .data = &starry_kr122ea0sra, |
| 1870 | }, { | 1969 | }, { |
| 1970 | .compatible = "tianma,tm070jdhg30", | ||
| 1971 | .data = &tianma_tm070jdhg30, | ||
| 1972 | }, { | ||
| 1871 | .compatible = "tpk,f07a-0102", | 1973 | .compatible = "tpk,f07a-0102", |
| 1872 | .data = &tpk_f07a_0102, | 1974 | .data = &tpk_f07a_0102, |
| 1873 | }, { | 1975 | }, { |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 00cfb5d2875f..04c0ed41374f 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
| @@ -638,10 +638,8 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) | |||
| 638 | vhdr->ImageLength, | 638 | vhdr->ImageLength, |
| 639 | GFP_KERNEL); | 639 | GFP_KERNEL); |
| 640 | 640 | ||
| 641 | if (!rdev->bios) { | 641 | if (!rdev->bios) |
| 642 | kfree(rdev->bios); | ||
| 643 | return false; | 642 | return false; |
| 644 | } | ||
| 645 | return true; | 643 | return true; |
| 646 | } | 644 | } |
| 647 | } | 645 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index af3bbe82fd48..956c425e639e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -97,9 +97,10 @@ | |||
| 97 | * 2.46.0 - Add PFP_SYNC_ME support on evergreen | 97 | * 2.46.0 - Add PFP_SYNC_ME support on evergreen |
| 98 | * 2.47.0 - Add UVD_NO_OP register support | 98 | * 2.47.0 - Add UVD_NO_OP register support |
| 99 | * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI | 99 | * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI |
| 100 | * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values | ||
| 100 | */ | 101 | */ |
| 101 | #define KMS_DRIVER_MAJOR 2 | 102 | #define KMS_DRIVER_MAJOR 2 |
| 102 | #define KMS_DRIVER_MINOR 48 | 103 | #define KMS_DRIVER_MINOR 49 |
| 103 | #define KMS_DRIVER_PATCHLEVEL 0 | 104 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 104 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 105 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 105 | void radeon_driver_unload_kms(struct drm_device *dev); | 106 | void radeon_driver_unload_kms(struct drm_device *dev); |
| @@ -366,11 +367,10 @@ static void | |||
| 366 | radeon_pci_shutdown(struct pci_dev *pdev) | 367 | radeon_pci_shutdown(struct pci_dev *pdev) |
| 367 | { | 368 | { |
| 368 | /* if we are running in a VM, make sure the device | 369 | /* if we are running in a VM, make sure the device |
| 369 | * torn down properly on reboot/shutdown. | 370 | * torn down properly on reboot/shutdown |
| 370 | * unfortunately we can't detect certain | ||
| 371 | * hypervisors so just do this all the time. | ||
| 372 | */ | 371 | */ |
| 373 | radeon_pci_remove(pdev); | 372 | if (radeon_device_is_virtual()) |
| 373 | radeon_pci_remove(pdev); | ||
| 374 | } | 374 | } |
| 375 | 375 | ||
| 376 | static int radeon_pmops_suspend(struct device *dev) | 376 | static int radeon_pmops_suspend(struct device *dev) |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0bcffd8a7bd3..96683f5b2b1b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
| 220 | 220 | ||
| 221 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | 221 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
| 222 | 222 | ||
| 223 | args->vram_size = rdev->mc.real_vram_size; | 223 | args->vram_size = (u64)man->size << PAGE_SHIFT; |
| 224 | args->vram_visible = (u64)man->size << PAGE_SHIFT; | 224 | args->vram_visible = rdev->mc.visible_vram_size; |
| 225 | args->vram_visible -= rdev->vram_pin_size; | 225 | args->vram_visible -= rdev->vram_pin_size; |
| 226 | args->gart_size = rdev->mc.gtt_size; | 226 | args->gart_size = rdev->mc.gtt_size; |
| 227 | args->gart_size -= rdev->gart_pin_size; | 227 | args->gart_size -= rdev->gart_pin_size; |
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c index a01efe39a820..f541a4b5ac51 100644 --- a/drivers/gpu/drm/radeon/vce_v1_0.c +++ b/drivers/gpu/drm/radeon/vce_v1_0.c | |||
| @@ -196,7 +196,7 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) | |||
| 196 | memset(&data[5], 0, 44); | 196 | memset(&data[5], 0, 44); |
| 197 | memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); | 197 | memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); |
| 198 | 198 | ||
| 199 | data += le32_to_cpu(data[4]) / 4; | 199 | data += (le32_to_cpu(sign->len) + 64) / 4; |
| 200 | data[0] = sign->val[i].sigval[0]; | 200 | data[0] = sign->val[i].sigval[0]; |
| 201 | data[1] = sign->val[i].sigval[1]; | 201 | data[1] = sign->val[i].sigval[1]; |
| 202 | data[2] = sign->val[i].sigval[2]; | 202 | data[2] = sign->val[i].sigval[2]; |
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 6f7f9c59f05b..ad31b3eb408f 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig | |||
| @@ -21,6 +21,16 @@ config ROCKCHIP_ANALOGIX_DP | |||
| 21 | for the Analogix Core DP driver. If you want to enable DP | 21 | for the Analogix Core DP driver. If you want to enable DP |
| 22 | on RK3288 based SoC, you should selet this option. | 22 | on RK3288 based SoC, you should selet this option. |
| 23 | 23 | ||
| 24 | config ROCKCHIP_CDN_DP | ||
| 25 | tristate "Rockchip cdn DP" | ||
| 26 | depends on DRM_ROCKCHIP | ||
| 27 | select SND_SOC_HDMI_CODEC if SND_SOC | ||
| 28 | help | ||
| 29 | This selects support for Rockchip SoC specific extensions | ||
| 30 | for the cdn DP driver. If you want to enable Dp on | ||
| 31 | RK3399 based SoC, you should select this | ||
| 32 | option. | ||
| 33 | |||
| 24 | config ROCKCHIP_DW_HDMI | 34 | config ROCKCHIP_DW_HDMI |
| 25 | tristate "Rockchip specific extensions for Synopsys DW HDMI" | 35 | tristate "Rockchip specific extensions for Synopsys DW HDMI" |
| 26 | depends on DRM_ROCKCHIP | 36 | depends on DRM_ROCKCHIP |
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 9746365694ba..c931e2a7d8de 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile | |||
| @@ -7,6 +7,8 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ | |||
| 7 | rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o | 7 | rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o |
| 8 | 8 | ||
| 9 | obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o | 9 | obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o |
| 10 | obj-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp.o | ||
| 11 | cdn-dp-objs := cdn-dp-core.o cdn-dp-reg.o | ||
| 10 | obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o | 12 | obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o |
| 11 | obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o | 13 | obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o |
| 12 | obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o | 14 | obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o |
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c new file mode 100644 index 000000000000..9ab67a670885 --- /dev/null +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c | |||
| @@ -0,0 +1,1260 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
| 3 | * Author: Chris Zhong <zyw@rock-chips.com> | ||
| 4 | * | ||
| 5 | * This software is licensed under the terms of the GNU General Public | ||
| 6 | * License version 2, as published by the Free Software Foundation, and | ||
| 7 | * may be copied, distributed, and modified under those terms. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <drm/drmP.h> | ||
| 16 | #include <drm/drm_atomic_helper.h> | ||
| 17 | #include <drm/drm_crtc_helper.h> | ||
| 18 | #include <drm/drm_dp_helper.h> | ||
| 19 | #include <drm/drm_edid.h> | ||
| 20 | #include <drm/drm_of.h> | ||
| 21 | |||
| 22 | #include <linux/clk.h> | ||
| 23 | #include <linux/component.h> | ||
| 24 | #include <linux/extcon.h> | ||
| 25 | #include <linux/firmware.h> | ||
| 26 | #include <linux/regmap.h> | ||
| 27 | #include <linux/reset.h> | ||
| 28 | #include <linux/mfd/syscon.h> | ||
| 29 | #include <linux/phy/phy.h> | ||
| 30 | |||
| 31 | #include <sound/hdmi-codec.h> | ||
| 32 | |||
| 33 | #include "cdn-dp-core.h" | ||
| 34 | #include "cdn-dp-reg.h" | ||
| 35 | #include "rockchip_drm_vop.h" | ||
| 36 | |||
| 37 | #define connector_to_dp(c) \ | ||
| 38 | container_of(c, struct cdn_dp_device, connector) | ||
| 39 | |||
| 40 | #define encoder_to_dp(c) \ | ||
| 41 | container_of(c, struct cdn_dp_device, encoder) | ||
| 42 | |||
| 43 | #define GRF_SOC_CON9 0x6224 | ||
| 44 | #define DP_SEL_VOP_LIT BIT(12) | ||
| 45 | #define GRF_SOC_CON26 0x6268 | ||
| 46 | #define UPHY_SEL_BIT 3 | ||
| 47 | #define UPHY_SEL_MASK BIT(19) | ||
| 48 | #define DPTX_HPD_SEL (3 << 12) | ||
| 49 | #define DPTX_HPD_DEL (2 << 12) | ||
| 50 | #define DPTX_HPD_SEL_MASK (3 << 28) | ||
| 51 | |||
| 52 | #define CDN_FW_TIMEOUT_MS (64 * 1000) | ||
| 53 | #define CDN_DPCD_TIMEOUT_MS 5000 | ||
| 54 | #define CDN_DP_FIRMWARE "rockchip/dptx.bin" | ||
| 55 | |||
| 56 | struct cdn_dp_data { | ||
| 57 | u8 max_phy; | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct cdn_dp_data rk3399_cdn_dp = { | ||
| 61 | .max_phy = 2, | ||
| 62 | }; | ||
| 63 | |||
| 64 | static const struct of_device_id cdn_dp_dt_ids[] = { | ||
| 65 | { .compatible = "rockchip,rk3399-cdn-dp", | ||
| 66 | .data = (void *)&rk3399_cdn_dp }, | ||
| 67 | {} | ||
| 68 | }; | ||
| 69 | |||
| 70 | MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids); | ||
| 71 | |||
| 72 | static int cdn_dp_grf_write(struct cdn_dp_device *dp, | ||
| 73 | unsigned int reg, unsigned int val) | ||
| 74 | { | ||
| 75 | int ret; | ||
| 76 | |||
| 77 | ret = clk_prepare_enable(dp->grf_clk); | ||
| 78 | if (ret) { | ||
| 79 | DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n"); | ||
| 80 | return ret; | ||
| 81 | } | ||
| 82 | |||
| 83 | ret = regmap_write(dp->grf, reg, val); | ||
| 84 | if (ret) { | ||
| 85 | DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); | ||
| 86 | return ret; | ||
| 87 | } | ||
| 88 | |||
| 89 | clk_disable_unprepare(dp->grf_clk); | ||
| 90 | |||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | static int cdn_dp_clk_enable(struct cdn_dp_device *dp) | ||
| 95 | { | ||
| 96 | int ret; | ||
| 97 | u32 rate; | ||
| 98 | |||
| 99 | ret = clk_prepare_enable(dp->pclk); | ||
| 100 | if (ret < 0) { | ||
| 101 | DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret); | ||
| 102 | goto err_pclk; | ||
| 103 | } | ||
| 104 | |||
| 105 | ret = clk_prepare_enable(dp->core_clk); | ||
| 106 | if (ret < 0) { | ||
| 107 | DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret); | ||
| 108 | goto err_core_clk; | ||
| 109 | } | ||
| 110 | |||
| 111 | ret = pm_runtime_get_sync(dp->dev); | ||
| 112 | if (ret < 0) { | ||
| 113 | DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); | ||
| 114 | goto err_pclk; | ||
| 115 | } | ||
| 116 | |||
| 117 | reset_control_assert(dp->core_rst); | ||
| 118 | reset_control_assert(dp->dptx_rst); | ||
| 119 | reset_control_assert(dp->apb_rst); | ||
| 120 | reset_control_deassert(dp->core_rst); | ||
| 121 | reset_control_deassert(dp->dptx_rst); | ||
| 122 | reset_control_deassert(dp->apb_rst); | ||
| 123 | |||
| 124 | rate = clk_get_rate(dp->core_clk); | ||
| 125 | if (!rate) { | ||
| 126 | DRM_DEV_ERROR(dp->dev, "get clk rate failed: %d\n", rate); | ||
| 127 | goto err_set_rate; | ||
| 128 | } | ||
| 129 | |||
| 130 | cdn_dp_set_fw_clk(dp, rate); | ||
| 131 | cdn_dp_clock_reset(dp); | ||
| 132 | |||
| 133 | return 0; | ||
| 134 | |||
| 135 | err_set_rate: | ||
| 136 | clk_disable_unprepare(dp->core_clk); | ||
| 137 | err_core_clk: | ||
| 138 | clk_disable_unprepare(dp->pclk); | ||
| 139 | err_pclk: | ||
| 140 | return ret; | ||
| 141 | } | ||
| 142 | |||
| 143 | static void cdn_dp_clk_disable(struct cdn_dp_device *dp) | ||
| 144 | { | ||
| 145 | pm_runtime_put_sync(dp->dev); | ||
| 146 | clk_disable_unprepare(dp->pclk); | ||
| 147 | clk_disable_unprepare(dp->core_clk); | ||
| 148 | } | ||
| 149 | |||
| 150 | static int cdn_dp_get_port_lanes(struct cdn_dp_port *port) | ||
| 151 | { | ||
| 152 | struct extcon_dev *edev = port->extcon; | ||
| 153 | union extcon_property_value property; | ||
| 154 | int dptx; | ||
| 155 | u8 lanes; | ||
| 156 | |||
| 157 | dptx = extcon_get_state(edev, EXTCON_DISP_DP); | ||
| 158 | if (dptx > 0) { | ||
| 159 | extcon_get_property(edev, EXTCON_DISP_DP, | ||
| 160 | EXTCON_PROP_USB_SS, &property); | ||
| 161 | if (property.intval) | ||
| 162 | lanes = 2; | ||
| 163 | else | ||
| 164 | lanes = 4; | ||
| 165 | } else { | ||
| 166 | lanes = 0; | ||
| 167 | } | ||
| 168 | |||
| 169 | return lanes; | ||
| 170 | } | ||
| 171 | |||
| 172 | static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count) | ||
| 173 | { | ||
| 174 | int ret; | ||
| 175 | u8 value; | ||
| 176 | |||
| 177 | *sink_count = 0; | ||
| 178 | ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1); | ||
| 179 | if (ret) | ||
| 180 | return ret; | ||
| 181 | |||
| 182 | *sink_count = DP_GET_SINK_COUNT(value); | ||
| 183 | return 0; | ||
| 184 | } | ||
| 185 | |||
| 186 | static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp) | ||
| 187 | { | ||
| 188 | struct cdn_dp_port *port; | ||
| 189 | int i, lanes; | ||
| 190 | |||
| 191 | for (i = 0; i < dp->ports; i++) { | ||
| 192 | port = dp->port[i]; | ||
| 193 | lanes = cdn_dp_get_port_lanes(port); | ||
| 194 | if (lanes) | ||
| 195 | return port; | ||
| 196 | } | ||
| 197 | return NULL; | ||
| 198 | } | ||
| 199 | |||
| 200 | static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp) | ||
| 201 | { | ||
| 202 | unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); | ||
| 203 | struct cdn_dp_port *port; | ||
| 204 | u8 sink_count = 0; | ||
| 205 | |||
| 206 | if (dp->active_port < 0 || dp->active_port >= dp->ports) { | ||
| 207 | DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n"); | ||
| 208 | return false; | ||
| 209 | } | ||
| 210 | |||
| 211 | port = dp->port[dp->active_port]; | ||
| 212 | |||
| 213 | /* | ||
| 214 | * Attempt to read sink count, retry in case the sink may not be ready. | ||
| 215 | * | ||
| 216 | * Sinks are *supposed* to come up within 1ms from an off state, but | ||
| 217 | * some docks need more time to power up. | ||
| 218 | */ | ||
| 219 | while (time_before(jiffies, timeout)) { | ||
| 220 | if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) | ||
| 221 | return false; | ||
| 222 | |||
| 223 | if (!cdn_dp_get_sink_count(dp, &sink_count)) | ||
| 224 | return sink_count ? true : false; | ||
| 225 | |||
| 226 | usleep_range(5000, 10000); | ||
| 227 | } | ||
| 228 | |||
| 229 | DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); | ||
| 230 | return false; | ||
| 231 | } | ||
| 232 | |||
| 233 | static enum drm_connector_status | ||
| 234 | cdn_dp_connector_detect(struct drm_connector *connector, bool force) | ||
| 235 | { | ||
| 236 | struct cdn_dp_device *dp = connector_to_dp(connector); | ||
| 237 | enum drm_connector_status status = connector_status_disconnected; | ||
| 238 | |||
| 239 | mutex_lock(&dp->lock); | ||
| 240 | if (dp->connected) | ||
| 241 | status = connector_status_connected; | ||
| 242 | mutex_unlock(&dp->lock); | ||
| 243 | |||
| 244 | return status; | ||
| 245 | } | ||
| 246 | |||
| 247 | static void cdn_dp_connector_destroy(struct drm_connector *connector) | ||
| 248 | { | ||
| 249 | drm_connector_unregister(connector); | ||
| 250 | drm_connector_cleanup(connector); | ||
| 251 | } | ||
| 252 | |||
| 253 | static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { | ||
| 254 | .dpms = drm_atomic_helper_connector_dpms, | ||
| 255 | .detect = cdn_dp_connector_detect, | ||
| 256 | .destroy = cdn_dp_connector_destroy, | ||
| 257 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
| 258 | .reset = drm_atomic_helper_connector_reset, | ||
| 259 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | ||
| 260 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | ||
| 261 | }; | ||
| 262 | |||
| 263 | static int cdn_dp_connector_get_modes(struct drm_connector *connector) | ||
| 264 | { | ||
| 265 | struct cdn_dp_device *dp = connector_to_dp(connector); | ||
| 266 | struct edid *edid; | ||
| 267 | int ret = 0; | ||
| 268 | |||
| 269 | mutex_lock(&dp->lock); | ||
| 270 | edid = dp->edid; | ||
| 271 | if (edid) { | ||
| 272 | DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", | ||
| 273 | edid->width_cm, edid->height_cm); | ||
| 274 | |||
| 275 | dp->sink_has_audio = drm_detect_monitor_audio(edid); | ||
| 276 | ret = drm_add_edid_modes(connector, edid); | ||
| 277 | if (ret) { | ||
| 278 | drm_mode_connector_update_edid_property(connector, | ||
| 279 | edid); | ||
| 280 | drm_edid_to_eld(connector, edid); | ||
| 281 | } | ||
| 282 | } | ||
| 283 | mutex_unlock(&dp->lock); | ||
| 284 | |||
| 285 | return ret; | ||
| 286 | } | ||
| 287 | |||
| 288 | static struct drm_encoder * | ||
| 289 | cdn_dp_connector_best_encoder(struct drm_connector *connector) | ||
| 290 | { | ||
| 291 | struct cdn_dp_device *dp = connector_to_dp(connector); | ||
| 292 | |||
| 293 | return &dp->encoder; | ||
| 294 | } | ||
| 295 | |||
| 296 | static int cdn_dp_connector_mode_valid(struct drm_connector *connector, | ||
| 297 | struct drm_display_mode *mode) | ||
| 298 | { | ||
| 299 | struct cdn_dp_device *dp = connector_to_dp(connector); | ||
| 300 | struct drm_display_info *display_info = &dp->connector.display_info; | ||
| 301 | u32 requested, actual, rate, sink_max, source_max = 0; | ||
| 302 | u8 lanes, bpc; | ||
| 303 | |||
| 304 | /* If DP is disconnected, every mode is invalid */ | ||
| 305 | if (!dp->connected) | ||
| 306 | return MODE_BAD; | ||
| 307 | |||
| 308 | switch (display_info->bpc) { | ||
| 309 | case 10: | ||
| 310 | bpc = 10; | ||
| 311 | break; | ||
| 312 | case 6: | ||
| 313 | bpc = 6; | ||
| 314 | break; | ||
| 315 | default: | ||
| 316 | bpc = 8; | ||
| 317 | break; | ||
| 318 | } | ||
| 319 | |||
| 320 | requested = mode->clock * bpc * 3 / 1000; | ||
| 321 | |||
| 322 | source_max = dp->lanes; | ||
| 323 | sink_max = drm_dp_max_lane_count(dp->dpcd); | ||
| 324 | lanes = min(source_max, sink_max); | ||
| 325 | |||
| 326 | source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE); | ||
| 327 | sink_max = drm_dp_max_link_rate(dp->dpcd); | ||
| 328 | rate = min(source_max, sink_max); | ||
| 329 | |||
| 330 | actual = rate * lanes / 100; | ||
| 331 | |||
| 332 | /* efficiency is about 0.8 */ | ||
| 333 | actual = actual * 8 / 10; | ||
| 334 | |||
| 335 | if (requested > actual) { | ||
| 336 | DRM_DEV_DEBUG_KMS(dp->dev, | ||
| 337 | "requested=%d, actual=%d, clock=%d\n", | ||
| 338 | requested, actual, mode->clock); | ||
| 339 | return MODE_CLOCK_HIGH; | ||
| 340 | } | ||
| 341 | |||
| 342 | return MODE_OK; | ||
| 343 | } | ||
| 344 | |||
| 345 | static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { | ||
| 346 | .get_modes = cdn_dp_connector_get_modes, | ||
| 347 | .best_encoder = cdn_dp_connector_best_encoder, | ||
| 348 | .mode_valid = cdn_dp_connector_mode_valid, | ||
| 349 | }; | ||
| 350 | |||
| 351 | static int cdn_dp_firmware_init(struct cdn_dp_device *dp) | ||
| 352 | { | ||
| 353 | int ret; | ||
| 354 | const u32 *iram_data, *dram_data; | ||
| 355 | const struct firmware *fw = dp->fw; | ||
| 356 | const struct cdn_firmware_header *hdr; | ||
| 357 | |||
| 358 | hdr = (struct cdn_firmware_header *)fw->data; | ||
| 359 | if (fw->size != le32_to_cpu(hdr->size_bytes)) { | ||
| 360 | DRM_DEV_ERROR(dp->dev, "firmware is invalid\n"); | ||
| 361 | return -EINVAL; | ||
| 362 | } | ||
| 363 | |||
| 364 | iram_data = (const u32 *)(fw->data + hdr->header_size); | ||
| 365 | dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size); | ||
| 366 | |||
| 367 | ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size, | ||
| 368 | dram_data, hdr->dram_size); | ||
| 369 | if (ret) | ||
| 370 | return ret; | ||
| 371 | |||
| 372 | ret = cdn_dp_set_firmware_active(dp, true); | ||
| 373 | if (ret) { | ||
| 374 | DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret); | ||
| 375 | return ret; | ||
| 376 | } | ||
| 377 | |||
| 378 | return cdn_dp_event_config(dp); | ||
| 379 | } | ||
| 380 | |||
| 381 | static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) | ||
| 382 | { | ||
| 383 | int ret; | ||
| 384 | |||
| 385 | if (!cdn_dp_check_sink_connection(dp)) | ||
| 386 | return -ENODEV; | ||
| 387 | |||
| 388 | ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, | ||
| 389 | DP_RECEIVER_CAP_SIZE); | ||
| 390 | if (ret) { | ||
| 391 | DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); | ||
| 392 | return ret; | ||
| 393 | } | ||
| 394 | |||
| 395 | kfree(dp->edid); | ||
| 396 | dp->edid = drm_do_get_edid(&dp->connector, | ||
| 397 | cdn_dp_get_edid_block, dp); | ||
| 398 | return 0; | ||
| 399 | } | ||
| 400 | |||
| 401 | static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) | ||
| 402 | { | ||
| 403 | union extcon_property_value property; | ||
| 404 | int ret; | ||
| 405 | |||
| 406 | ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, | ||
| 407 | (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK); | ||
| 408 | if (ret) | ||
| 409 | return ret; | ||
| 410 | |||
| 411 | if (!port->phy_enabled) { | ||
| 412 | ret = phy_power_on(port->phy); | ||
| 413 | if (ret) { | ||
| 414 | DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n", | ||
| 415 | ret); | ||
| 416 | goto err_phy; | ||
| 417 | } | ||
| 418 | port->phy_enabled = true; | ||
| 419 | } | ||
| 420 | |||
| 421 | ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, | ||
| 422 | DPTX_HPD_SEL_MASK | DPTX_HPD_SEL); | ||
| 423 | if (ret) { | ||
| 424 | DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret); | ||
| 425 | goto err_power_on; | ||
| 426 | } | ||
| 427 | |||
| 428 | ret = cdn_dp_get_hpd_status(dp); | ||
| 429 | if (ret <= 0) { | ||
| 430 | if (!ret) | ||
| 431 | DRM_DEV_ERROR(dp->dev, "hpd does not exist\n"); | ||
| 432 | goto err_power_on; | ||
| 433 | } | ||
| 434 | |||
| 435 | ret = extcon_get_property(port->extcon, EXTCON_DISP_DP, | ||
| 436 | EXTCON_PROP_USB_TYPEC_POLARITY, &property); | ||
| 437 | if (ret) { | ||
| 438 | DRM_DEV_ERROR(dp->dev, "get property failed\n"); | ||
| 439 | goto err_power_on; | ||
| 440 | } | ||
| 441 | |||
| 442 | port->lanes = cdn_dp_get_port_lanes(port); | ||
| 443 | ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval); | ||
| 444 | if (ret) { | ||
| 445 | DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n", | ||
| 446 | ret); | ||
| 447 | goto err_power_on; | ||
| 448 | } | ||
| 449 | |||
| 450 | dp->active_port = port->id; | ||
| 451 | return 0; | ||
| 452 | |||
| 453 | err_power_on: | ||
| 454 | if (phy_power_off(port->phy)) | ||
| 455 | DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); | ||
| 456 | else | ||
| 457 | port->phy_enabled = false; | ||
| 458 | |||
| 459 | err_phy: | ||
| 460 | cdn_dp_grf_write(dp, GRF_SOC_CON26, | ||
| 461 | DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); | ||
| 462 | return ret; | ||
| 463 | } | ||
| 464 | |||
| 465 | static int cdn_dp_disable_phy(struct cdn_dp_device *dp, | ||
| 466 | struct cdn_dp_port *port) | ||
| 467 | { | ||
| 468 | int ret; | ||
| 469 | |||
| 470 | if (port->phy_enabled) { | ||
| 471 | ret = phy_power_off(port->phy); | ||
| 472 | if (ret) { | ||
| 473 | DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); | ||
| 474 | return ret; | ||
| 475 | } | ||
| 476 | } | ||
| 477 | |||
| 478 | port->phy_enabled = false; | ||
| 479 | port->lanes = 0; | ||
| 480 | dp->active_port = -1; | ||
| 481 | return 0; | ||
| 482 | } | ||
| 483 | |||
| 484 | static int cdn_dp_disable(struct cdn_dp_device *dp) | ||
| 485 | { | ||
| 486 | int ret, i; | ||
| 487 | |||
| 488 | if (!dp->active) | ||
| 489 | return 0; | ||
| 490 | |||
| 491 | for (i = 0; i < dp->ports; i++) | ||
| 492 | cdn_dp_disable_phy(dp, dp->port[i]); | ||
| 493 | |||
| 494 | ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, | ||
| 495 | DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); | ||
| 496 | if (ret) { | ||
| 497 | DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n", | ||
| 498 | ret); | ||
| 499 | return ret; | ||
| 500 | } | ||
| 501 | |||
| 502 | cdn_dp_set_firmware_active(dp, false); | ||
| 503 | cdn_dp_clk_disable(dp); | ||
| 504 | dp->active = false; | ||
| 505 | dp->link.rate = 0; | ||
| 506 | dp->link.num_lanes = 0; | ||
| 507 | if (!dp->connected) { | ||
| 508 | kfree(dp->edid); | ||
| 509 | dp->edid = NULL; | ||
| 510 | } | ||
| 511 | |||
| 512 | return 0; | ||
| 513 | } | ||
| 514 | |||
| 515 | static int cdn_dp_enable(struct cdn_dp_device *dp) | ||
| 516 | { | ||
| 517 | int ret, i, lanes; | ||
| 518 | struct cdn_dp_port *port; | ||
| 519 | |||
| 520 | port = cdn_dp_connected_port(dp); | ||
| 521 | if (!port) { | ||
| 522 | DRM_DEV_ERROR(dp->dev, | ||
| 523 | "Can't enable without connection\n"); | ||
| 524 | return -ENODEV; | ||
| 525 | } | ||
| 526 | |||
| 527 | if (dp->active) | ||
| 528 | return 0; | ||
| 529 | |||
| 530 | ret = cdn_dp_clk_enable(dp); | ||
| 531 | if (ret) | ||
| 532 | return ret; | ||
| 533 | |||
| 534 | ret = cdn_dp_firmware_init(dp); | ||
| 535 | if (ret) { | ||
| 536 | DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret); | ||
| 537 | goto err_clk_disable; | ||
| 538 | } | ||
| 539 | |||
| 540 | /* only enable the port that connected with downstream device */ | ||
| 541 | for (i = port->id; i < dp->ports; i++) { | ||
| 542 | port = dp->port[i]; | ||
| 543 | lanes = cdn_dp_get_port_lanes(port); | ||
| 544 | if (lanes) { | ||
| 545 | ret = cdn_dp_enable_phy(dp, port); | ||
| 546 | if (ret) | ||
| 547 | continue; | ||
| 548 | |||
| 549 | ret = cdn_dp_get_sink_capability(dp); | ||
| 550 | if (ret) { | ||
| 551 | cdn_dp_disable_phy(dp, port); | ||
| 552 | } else { | ||
| 553 | dp->active = true; | ||
| 554 | dp->lanes = port->lanes; | ||
| 555 | return 0; | ||
| 556 | } | ||
| 557 | } | ||
| 558 | } | ||
| 559 | |||
| 560 | err_clk_disable: | ||
| 561 | cdn_dp_clk_disable(dp); | ||
| 562 | return ret; | ||
| 563 | } | ||
| 564 | |||
| 565 | static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, | ||
| 566 | struct drm_display_mode *mode, | ||
| 567 | struct drm_display_mode *adjusted) | ||
| 568 | { | ||
| 569 | struct cdn_dp_device *dp = encoder_to_dp(encoder); | ||
| 570 | struct drm_display_info *display_info = &dp->connector.display_info; | ||
| 571 | struct video_info *video = &dp->video_info; | ||
| 572 | |||
| 573 | switch (display_info->bpc) { | ||
| 574 | case 10: | ||
| 575 | video->color_depth = 10; | ||
| 576 | break; | ||
| 577 | case 6: | ||
| 578 | video->color_depth = 6; | ||
| 579 | break; | ||
| 580 | default: | ||
| 581 | video->color_depth = 8; | ||
| 582 | break; | ||
| 583 | } | ||
| 584 | |||
| 585 | video->color_fmt = PXL_RGB; | ||
| 586 | video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); | ||
| 587 | video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); | ||
| 588 | |||
| 589 | memcpy(&dp->mode, adjusted, sizeof(*mode)); | ||
| 590 | } | ||
| 591 | |||
| 592 | static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) | ||
| 593 | { | ||
| 594 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
| 595 | struct cdn_dp_port *port = cdn_dp_connected_port(dp); | ||
| 596 | u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); | ||
| 597 | |||
| 598 | if (!port || !dp->link.rate || !dp->link.num_lanes) | ||
| 599 | return false; | ||
| 600 | |||
| 601 | if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, | ||
| 602 | DP_LINK_STATUS_SIZE)) { | ||
| 603 | DRM_ERROR("Failed to get link status\n"); | ||
| 604 | return false; | ||
| 605 | } | ||
| 606 | |||
| 607 | /* if link training is requested we should perform it always */ | ||
| 608 | return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); | ||
| 609 | } | ||
| 610 | |||
| 611 | static void cdn_dp_encoder_enable(struct drm_encoder *encoder) | ||
| 612 | { | ||
| 613 | struct cdn_dp_device *dp = encoder_to_dp(encoder); | ||
| 614 | int ret, val; | ||
| 615 | struct rockchip_crtc_state *state; | ||
| 616 | |||
| 617 | ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); | ||
| 618 | if (ret < 0) { | ||
| 619 | DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); | ||
| 620 | return; | ||
| 621 | } | ||
| 622 | |||
| 623 | DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", | ||
| 624 | (ret) ? "LIT" : "BIG"); | ||
| 625 | state = to_rockchip_crtc_state(encoder->crtc->state); | ||
| 626 | if (ret) { | ||
| 627 | val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); | ||
| 628 | state->output_mode = ROCKCHIP_OUT_MODE_P888; | ||
| 629 | } else { | ||
| 630 | val = DP_SEL_VOP_LIT << 16; | ||
| 631 | state->output_mode = ROCKCHIP_OUT_MODE_AAAA; | ||
| 632 | } | ||
| 633 | |||
| 634 | ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); | ||
| 635 | if (ret) | ||
| 636 | return; | ||
| 637 | |||
| 638 | mutex_lock(&dp->lock); | ||
| 639 | |||
| 640 | ret = cdn_dp_enable(dp); | ||
| 641 | if (ret) { | ||
| 642 | DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", | ||
| 643 | ret); | ||
| 644 | goto out; | ||
| 645 | } | ||
| 646 | if (!cdn_dp_check_link_status(dp)) { | ||
| 647 | ret = cdn_dp_train_link(dp); | ||
| 648 | if (ret) { | ||
| 649 | DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret); | ||
| 650 | goto out; | ||
| 651 | } | ||
| 652 | } | ||
| 653 | |||
| 654 | ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE); | ||
| 655 | if (ret) { | ||
| 656 | DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret); | ||
| 657 | goto out; | ||
| 658 | } | ||
| 659 | |||
| 660 | ret = cdn_dp_config_video(dp); | ||
| 661 | if (ret) { | ||
| 662 | DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret); | ||
| 663 | goto out; | ||
| 664 | } | ||
| 665 | |||
| 666 | ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID); | ||
| 667 | if (ret) { | ||
| 668 | DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret); | ||
| 669 | goto out; | ||
| 670 | } | ||
| 671 | out: | ||
| 672 | mutex_unlock(&dp->lock); | ||
| 673 | } | ||
| 674 | |||
| 675 | static void cdn_dp_encoder_disable(struct drm_encoder *encoder) | ||
| 676 | { | ||
| 677 | struct cdn_dp_device *dp = encoder_to_dp(encoder); | ||
| 678 | int ret; | ||
| 679 | |||
| 680 | mutex_lock(&dp->lock); | ||
| 681 | if (dp->active) { | ||
| 682 | ret = cdn_dp_disable(dp); | ||
| 683 | if (ret) { | ||
| 684 | DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", | ||
| 685 | ret); | ||
| 686 | } | ||
| 687 | } | ||
| 688 | mutex_unlock(&dp->lock); | ||
| 689 | |||
| 690 | /* | ||
| 691 | * In the following 2 cases, we need to run the event_work to re-enable | ||
| 692 | * the DP: | ||
| 693 | * 1. If there is not just one port device is connected, and remove one | ||
| 694 | * device from a port, the DP will be disabled here, at this case, | ||
| 695 | * run the event_work to re-open DP for the other port. | ||
| 696 | * 2. If re-training or re-config failed, the DP will be disabled here. | ||
| 697 | * run the event_work to re-connect it. | ||
| 698 | */ | ||
| 699 | if (!dp->connected && cdn_dp_connected_port(dp)) | ||
| 700 | schedule_work(&dp->event_work); | ||
| 701 | } | ||
| 702 | |||
| 703 | static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder, | ||
| 704 | struct drm_crtc_state *crtc_state, | ||
| 705 | struct drm_connector_state *conn_state) | ||
| 706 | { | ||
| 707 | struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); | ||
| 708 | |||
| 709 | s->output_mode = ROCKCHIP_OUT_MODE_AAAA; | ||
| 710 | s->output_type = DRM_MODE_CONNECTOR_DisplayPort; | ||
| 711 | |||
| 712 | return 0; | ||
| 713 | } | ||
| 714 | |||
| 715 | static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { | ||
| 716 | .mode_set = cdn_dp_encoder_mode_set, | ||
| 717 | .enable = cdn_dp_encoder_enable, | ||
| 718 | .disable = cdn_dp_encoder_disable, | ||
| 719 | .atomic_check = cdn_dp_encoder_atomic_check, | ||
| 720 | }; | ||
| 721 | |||
| 722 | static const struct drm_encoder_funcs cdn_dp_encoder_funcs = { | ||
| 723 | .destroy = drm_encoder_cleanup, | ||
| 724 | }; | ||
| 725 | |||
| 726 | static int cdn_dp_parse_dt(struct cdn_dp_device *dp) | ||
| 727 | { | ||
| 728 | struct device *dev = dp->dev; | ||
| 729 | struct device_node *np = dev->of_node; | ||
| 730 | struct platform_device *pdev = to_platform_device(dev); | ||
| 731 | struct resource *res; | ||
| 732 | |||
| 733 | dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); | ||
| 734 | if (IS_ERR(dp->grf)) { | ||
| 735 | DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n"); | ||
| 736 | return PTR_ERR(dp->grf); | ||
| 737 | } | ||
| 738 | |||
| 739 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 740 | dp->regs = devm_ioremap_resource(dev, res); | ||
| 741 | if (IS_ERR(dp->regs)) { | ||
| 742 | DRM_DEV_ERROR(dev, "ioremap reg failed\n"); | ||
| 743 | return PTR_ERR(dp->regs); | ||
| 744 | } | ||
| 745 | |||
| 746 | dp->core_clk = devm_clk_get(dev, "core-clk"); | ||
| 747 | if (IS_ERR(dp->core_clk)) { | ||
| 748 | DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n"); | ||
| 749 | return PTR_ERR(dp->core_clk); | ||
| 750 | } | ||
| 751 | |||
| 752 | dp->pclk = devm_clk_get(dev, "pclk"); | ||
| 753 | if (IS_ERR(dp->pclk)) { | ||
| 754 | DRM_DEV_ERROR(dev, "cannot get pclk\n"); | ||
| 755 | return PTR_ERR(dp->pclk); | ||
| 756 | } | ||
| 757 | |||
| 758 | dp->spdif_clk = devm_clk_get(dev, "spdif"); | ||
| 759 | if (IS_ERR(dp->spdif_clk)) { | ||
| 760 | DRM_DEV_ERROR(dev, "cannot get spdif_clk\n"); | ||
| 761 | return PTR_ERR(dp->spdif_clk); | ||
| 762 | } | ||
| 763 | |||
| 764 | dp->grf_clk = devm_clk_get(dev, "grf"); | ||
| 765 | if (IS_ERR(dp->grf_clk)) { | ||
| 766 | DRM_DEV_ERROR(dev, "cannot get grf clk\n"); | ||
| 767 | return PTR_ERR(dp->grf_clk); | ||
| 768 | } | ||
| 769 | |||
| 770 | dp->spdif_rst = devm_reset_control_get(dev, "spdif"); | ||
| 771 | if (IS_ERR(dp->spdif_rst)) { | ||
| 772 | DRM_DEV_ERROR(dev, "no spdif reset control found\n"); | ||
| 773 | return PTR_ERR(dp->spdif_rst); | ||
| 774 | } | ||
| 775 | |||
| 776 | dp->dptx_rst = devm_reset_control_get(dev, "dptx"); | ||
| 777 | if (IS_ERR(dp->dptx_rst)) { | ||
| 778 | DRM_DEV_ERROR(dev, "no uphy reset control found\n"); | ||
| 779 | return PTR_ERR(dp->dptx_rst); | ||
| 780 | } | ||
| 781 | |||
| 782 | dp->core_rst = devm_reset_control_get(dev, "core"); | ||
| 783 | if (IS_ERR(dp->core_rst)) { | ||
| 784 | DRM_DEV_ERROR(dev, "no core reset control found\n"); | ||
| 785 | return PTR_ERR(dp->core_rst); | ||
| 786 | } | ||
| 787 | |||
| 788 | dp->apb_rst = devm_reset_control_get(dev, "apb"); | ||
| 789 | if (IS_ERR(dp->apb_rst)) { | ||
| 790 | DRM_DEV_ERROR(dev, "no apb reset control found\n"); | ||
| 791 | return PTR_ERR(dp->apb_rst); | ||
| 792 | } | ||
| 793 | |||
| 794 | return 0; | ||
| 795 | } | ||
| 796 | |||
| 797 | static int cdn_dp_audio_hw_params(struct device *dev, void *data, | ||
| 798 | struct hdmi_codec_daifmt *daifmt, | ||
| 799 | struct hdmi_codec_params *params) | ||
| 800 | { | ||
| 801 | struct cdn_dp_device *dp = dev_get_drvdata(dev); | ||
| 802 | struct audio_info audio = { | ||
| 803 | .sample_width = params->sample_width, | ||
| 804 | .sample_rate = params->sample_rate, | ||
| 805 | .channels = params->channels, | ||
| 806 | }; | ||
| 807 | int ret; | ||
| 808 | |||
| 809 | mutex_lock(&dp->lock); | ||
| 810 | if (!dp->active) { | ||
| 811 | ret = -ENODEV; | ||
| 812 | goto out; | ||
| 813 | } | ||
| 814 | |||
| 815 | switch (daifmt->fmt) { | ||
| 816 | case HDMI_I2S: | ||
| 817 | audio.format = AFMT_I2S; | ||
| 818 | break; | ||
| 819 | case HDMI_SPDIF: | ||
| 820 | audio.format = AFMT_SPDIF; | ||
| 821 | break; | ||
| 822 | default: | ||
| 823 | DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); | ||
| 824 | ret = -EINVAL; | ||
| 825 | goto out; | ||
| 826 | } | ||
| 827 | |||
| 828 | ret = cdn_dp_audio_config(dp, &audio); | ||
| 829 | if (!ret) | ||
| 830 | dp->audio_info = audio; | ||
| 831 | |||
| 832 | out: | ||
| 833 | mutex_unlock(&dp->lock); | ||
| 834 | return ret; | ||
| 835 | } | ||
| 836 | |||
| 837 | static void cdn_dp_audio_shutdown(struct device *dev, void *data) | ||
| 838 | { | ||
| 839 | struct cdn_dp_device *dp = dev_get_drvdata(dev); | ||
| 840 | int ret; | ||
| 841 | |||
| 842 | mutex_lock(&dp->lock); | ||
| 843 | if (!dp->active) | ||
| 844 | goto out; | ||
| 845 | |||
| 846 | ret = cdn_dp_audio_stop(dp, &dp->audio_info); | ||
| 847 | if (!ret) | ||
| 848 | dp->audio_info.format = AFMT_UNUSED; | ||
| 849 | out: | ||
| 850 | mutex_unlock(&dp->lock); | ||
| 851 | } | ||
| 852 | |||
| 853 | static int cdn_dp_audio_digital_mute(struct device *dev, void *data, | ||
| 854 | bool enable) | ||
| 855 | { | ||
| 856 | struct cdn_dp_device *dp = dev_get_drvdata(dev); | ||
| 857 | int ret; | ||
| 858 | |||
| 859 | mutex_lock(&dp->lock); | ||
| 860 | if (!dp->active) { | ||
| 861 | ret = -ENODEV; | ||
| 862 | goto out; | ||
| 863 | } | ||
| 864 | |||
| 865 | ret = cdn_dp_audio_mute(dp, enable); | ||
| 866 | |||
| 867 | out: | ||
| 868 | mutex_unlock(&dp->lock); | ||
| 869 | return ret; | ||
| 870 | } | ||
| 871 | |||
| 872 | static int cdn_dp_audio_get_eld(struct device *dev, void *data, | ||
| 873 | u8 *buf, size_t len) | ||
| 874 | { | ||
| 875 | struct cdn_dp_device *dp = dev_get_drvdata(dev); | ||
| 876 | |||
| 877 | memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); | ||
| 878 | |||
| 879 | return 0; | ||
| 880 | } | ||
| 881 | |||
| 882 | static const struct hdmi_codec_ops audio_codec_ops = { | ||
| 883 | .hw_params = cdn_dp_audio_hw_params, | ||
| 884 | .audio_shutdown = cdn_dp_audio_shutdown, | ||
| 885 | .digital_mute = cdn_dp_audio_digital_mute, | ||
| 886 | .get_eld = cdn_dp_audio_get_eld, | ||
| 887 | }; | ||
| 888 | |||
| 889 | static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, | ||
| 890 | struct device *dev) | ||
| 891 | { | ||
| 892 | struct hdmi_codec_pdata codec_data = { | ||
| 893 | .i2s = 1, | ||
| 894 | .spdif = 1, | ||
| 895 | .ops = &audio_codec_ops, | ||
| 896 | .max_i2s_channels = 8, | ||
| 897 | }; | ||
| 898 | |||
| 899 | dp->audio_pdev = platform_device_register_data( | ||
| 900 | dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, | ||
| 901 | &codec_data, sizeof(codec_data)); | ||
| 902 | |||
| 903 | return PTR_ERR_OR_ZERO(dp->audio_pdev); | ||
| 904 | } | ||
| 905 | |||
| 906 | static int cdn_dp_request_firmware(struct cdn_dp_device *dp) | ||
| 907 | { | ||
| 908 | int ret; | ||
| 909 | unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS); | ||
| 910 | unsigned long sleep = 1000; | ||
| 911 | |||
| 912 | WARN_ON(!mutex_is_locked(&dp->lock)); | ||
| 913 | |||
| 914 | if (dp->fw_loaded) | ||
| 915 | return 0; | ||
| 916 | |||
| 917 | /* Drop the lock before getting the firmware to avoid blocking boot */ | ||
| 918 | mutex_unlock(&dp->lock); | ||
| 919 | |||
| 920 | while (time_before(jiffies, timeout)) { | ||
| 921 | ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev); | ||
| 922 | if (ret == -ENOENT) { | ||
| 923 | msleep(sleep); | ||
| 924 | sleep *= 2; | ||
| 925 | continue; | ||
| 926 | } else if (ret) { | ||
| 927 | DRM_DEV_ERROR(dp->dev, | ||
| 928 | "failed to request firmware: %d\n", ret); | ||
| 929 | goto out; | ||
| 930 | } | ||
| 931 | |||
| 932 | dp->fw_loaded = true; | ||
| 933 | ret = 0; | ||
| 934 | goto out; | ||
| 935 | } | ||
| 936 | |||
| 937 | DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n"); | ||
| 938 | ret = -ETIMEDOUT; | ||
| 939 | out: | ||
| 940 | mutex_lock(&dp->lock); | ||
| 941 | return ret; | ||
| 942 | } | ||
| 943 | |||
| 944 | static void cdn_dp_pd_event_work(struct work_struct *work) | ||
| 945 | { | ||
| 946 | struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, | ||
| 947 | event_work); | ||
| 948 | struct drm_connector *connector = &dp->connector; | ||
| 949 | enum drm_connector_status old_status; | ||
| 950 | |||
| 951 | int ret; | ||
| 952 | |||
| 953 | mutex_lock(&dp->lock); | ||
| 954 | |||
| 955 | if (dp->suspended) | ||
| 956 | goto out; | ||
| 957 | |||
| 958 | ret = cdn_dp_request_firmware(dp); | ||
| 959 | if (ret) | ||
| 960 | goto out; | ||
| 961 | |||
| 962 | dp->connected = true; | ||
| 963 | |||
| 964 | /* Not connected, notify userspace to disable the block */ | ||
| 965 | if (!cdn_dp_connected_port(dp)) { | ||
| 966 | DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n"); | ||
| 967 | dp->connected = false; | ||
| 968 | |||
| 969 | /* Connected but not enabled, enable the block */ | ||
| 970 | } else if (!dp->active) { | ||
| 971 | DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n"); | ||
| 972 | ret = cdn_dp_enable(dp); | ||
| 973 | if (ret) { | ||
| 974 | DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret); | ||
| 975 | dp->connected = false; | ||
| 976 | } | ||
| 977 | |||
| 978 | /* Enabled and connected to a dongle without a sink, notify userspace */ | ||
| 979 | } else if (!cdn_dp_check_sink_connection(dp)) { | ||
| 980 | DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n"); | ||
| 981 | dp->connected = false; | ||
| 982 | |||
| 983 | /* Enabled and connected with a sink, re-train if requested */ | ||
| 984 | } else if (!cdn_dp_check_link_status(dp)) { | ||
| 985 | unsigned int rate = dp->link.rate; | ||
| 986 | unsigned int lanes = dp->link.num_lanes; | ||
| 987 | struct drm_display_mode *mode = &dp->mode; | ||
| 988 | |||
| 989 | DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); | ||
| 990 | ret = cdn_dp_train_link(dp); | ||
| 991 | if (ret) { | ||
| 992 | dp->connected = false; | ||
| 993 | DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret); | ||
| 994 | goto out; | ||
| 995 | } | ||
| 996 | |||
| 997 | /* If training result is changed, update the video config */ | ||
| 998 | if (mode->clock && | ||
| 999 | (rate != dp->link.rate || lanes != dp->link.num_lanes)) { | ||
| 1000 | ret = cdn_dp_config_video(dp); | ||
| 1001 | if (ret) { | ||
| 1002 | dp->connected = false; | ||
| 1003 | DRM_DEV_ERROR(dp->dev, | ||
| 1004 | "Failed to config video %d\n", | ||
| 1005 | ret); | ||
| 1006 | } | ||
| 1007 | } | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | out: | ||
| 1011 | mutex_unlock(&dp->lock); | ||
| 1012 | |||
| 1013 | old_status = connector->status; | ||
| 1014 | connector->status = connector->funcs->detect(connector, false); | ||
| 1015 | if (old_status != connector->status) | ||
| 1016 | drm_kms_helper_hotplug_event(dp->drm_dev); | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | static int cdn_dp_pd_event(struct notifier_block *nb, | ||
| 1020 | unsigned long event, void *priv) | ||
| 1021 | { | ||
| 1022 | struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port, | ||
| 1023 | event_nb); | ||
| 1024 | struct cdn_dp_device *dp = port->dp; | ||
| 1025 | |||
| 1026 | /* | ||
| 1027 | * It would be nice to be able to just do the work inline right here. | ||
| 1028 | * However, we need to make a bunch of calls that might sleep in order | ||
| 1029 | * to turn on the block/phy, so use a worker instead. | ||
| 1030 | */ | ||
| 1031 | schedule_work(&dp->event_work); | ||
| 1032 | |||
| 1033 | return NOTIFY_DONE; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | static int cdn_dp_bind(struct device *dev, struct device *master, void *data) | ||
| 1037 | { | ||
| 1038 | struct cdn_dp_device *dp = dev_get_drvdata(dev); | ||
| 1039 | struct drm_encoder *encoder; | ||
| 1040 | struct drm_connector *connector; | ||
| 1041 | struct cdn_dp_port *port; | ||
| 1042 | struct drm_device *drm_dev = data; | ||
| 1043 | int ret, i; | ||
| 1044 | |||
| 1045 | ret = cdn_dp_parse_dt(dp); | ||
| 1046 | if (ret < 0) | ||
| 1047 | return ret; | ||
| 1048 | |||
| 1049 | dp->drm_dev = drm_dev; | ||
| 1050 | dp->connected = false; | ||
| 1051 | dp->active = false; | ||
| 1052 | dp->active_port = -1; | ||
| 1053 | |||
| 1054 | INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); | ||
| 1055 | |||
| 1056 | encoder = &dp->encoder; | ||
| 1057 | |||
| 1058 | encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, | ||
| 1059 | dev->of_node); | ||
| 1060 | DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); | ||
| 1061 | |||
| 1062 | ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs, | ||
| 1063 | DRM_MODE_ENCODER_TMDS, NULL); | ||
| 1064 | if (ret) { | ||
| 1065 | DRM_ERROR("failed to initialize encoder with drm\n"); | ||
| 1066 | return ret; | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); | ||
| 1070 | |||
| 1071 | connector = &dp->connector; | ||
| 1072 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
| 1073 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
| 1074 | |||
| 1075 | ret = drm_connector_init(drm_dev, connector, | ||
| 1076 | &cdn_dp_atomic_connector_funcs, | ||
| 1077 | DRM_MODE_CONNECTOR_DisplayPort); | ||
| 1078 | if (ret) { | ||
| 1079 | DRM_ERROR("failed to initialize connector with drm\n"); | ||
| 1080 | goto err_free_encoder; | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); | ||
| 1084 | |||
| 1085 | ret = drm_mode_connector_attach_encoder(connector, encoder); | ||
| 1086 | if (ret) { | ||
| 1087 | DRM_ERROR("failed to attach connector and encoder\n"); | ||
| 1088 | goto err_free_connector; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | cdn_dp_audio_codec_init(dp, dev); | ||
| 1092 | |||
| 1093 | for (i = 0; i < dp->ports; i++) { | ||
| 1094 | port = dp->port[i]; | ||
| 1095 | |||
| 1096 | port->event_nb.notifier_call = cdn_dp_pd_event; | ||
| 1097 | ret = devm_extcon_register_notifier(dp->dev, port->extcon, | ||
| 1098 | EXTCON_DISP_DP, | ||
| 1099 | &port->event_nb); | ||
| 1100 | if (ret) { | ||
| 1101 | DRM_DEV_ERROR(dev, | ||
| 1102 | "register EXTCON_DISP_DP notifier err\n"); | ||
| 1103 | goto err_free_connector; | ||
| 1104 | } | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | pm_runtime_enable(dev); | ||
| 1108 | |||
| 1109 | schedule_work(&dp->event_work); | ||
| 1110 | |||
| 1111 | return 0; | ||
| 1112 | |||
| 1113 | err_free_connector: | ||
| 1114 | drm_connector_cleanup(connector); | ||
| 1115 | err_free_encoder: | ||
| 1116 | drm_encoder_cleanup(encoder); | ||
| 1117 | return ret; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) | ||
| 1121 | { | ||
| 1122 | struct cdn_dp_device *dp = dev_get_drvdata(dev); | ||
| 1123 | struct drm_encoder *encoder = &dp->encoder; | ||
| 1124 | struct drm_connector *connector = &dp->connector; | ||
| 1125 | |||
| 1126 | cancel_work_sync(&dp->event_work); | ||
| 1127 | platform_device_unregister(dp->audio_pdev); | ||
| 1128 | cdn_dp_encoder_disable(encoder); | ||
| 1129 | encoder->funcs->destroy(encoder); | ||
| 1130 | connector->funcs->destroy(connector); | ||
| 1131 | |||
| 1132 | pm_runtime_disable(dev); | ||
| 1133 | release_firmware(dp->fw); | ||
| 1134 | kfree(dp->edid); | ||
| 1135 | dp->edid = NULL; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | static const struct component_ops cdn_dp_component_ops = { | ||
| 1139 | .bind = cdn_dp_bind, | ||
| 1140 | .unbind = cdn_dp_unbind, | ||
| 1141 | }; | ||
| 1142 | |||
| 1143 | int cdn_dp_suspend(struct device *dev) | ||
| 1144 | { | ||
| 1145 | struct cdn_dp_device *dp = dev_get_drvdata(dev); | ||
| 1146 | int ret = 0; | ||
| 1147 | |||
| 1148 | mutex_lock(&dp->lock); | ||
| 1149 | if (dp->active) | ||
| 1150 | ret = cdn_dp_disable(dp); | ||
| 1151 | dp->suspended = true; | ||
| 1152 | mutex_unlock(&dp->lock); | ||
| 1153 | |||
| 1154 | return ret; | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | int cdn_dp_resume(struct device *dev) | ||
| 1158 | { | ||
| 1159 | struct cdn_dp_device *dp = dev_get_drvdata(dev); | ||
| 1160 | |||
| 1161 | mutex_lock(&dp->lock); | ||
| 1162 | dp->suspended = false; | ||
| 1163 | if (dp->fw_loaded) | ||
| 1164 | schedule_work(&dp->event_work); | ||
| 1165 | mutex_unlock(&dp->lock); | ||
| 1166 | |||
| 1167 | return 0; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | static int cdn_dp_probe(struct platform_device *pdev) | ||
| 1171 | { | ||
| 1172 | struct device *dev = &pdev->dev; | ||
| 1173 | const struct of_device_id *match; | ||
| 1174 | struct cdn_dp_data *dp_data; | ||
| 1175 | struct cdn_dp_port *port; | ||
| 1176 | struct cdn_dp_device *dp; | ||
| 1177 | struct extcon_dev *extcon; | ||
| 1178 | struct phy *phy; | ||
| 1179 | int i; | ||
| 1180 | |||
| 1181 | dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); | ||
| 1182 | if (!dp) | ||
| 1183 | return -ENOMEM; | ||
| 1184 | dp->dev = dev; | ||
| 1185 | |||
| 1186 | match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); | ||
| 1187 | dp_data = (struct cdn_dp_data *)match->data; | ||
| 1188 | |||
| 1189 | for (i = 0; i < dp_data->max_phy; i++) { | ||
| 1190 | extcon = extcon_get_edev_by_phandle(dev, i); | ||
| 1191 | phy = devm_of_phy_get_by_index(dev, dev->of_node, i); | ||
| 1192 | |||
| 1193 | if (PTR_ERR(extcon) == -EPROBE_DEFER || | ||
| 1194 | PTR_ERR(phy) == -EPROBE_DEFER) | ||
| 1195 | return -EPROBE_DEFER; | ||
| 1196 | |||
| 1197 | if (IS_ERR(extcon) || IS_ERR(phy)) | ||
| 1198 | continue; | ||
| 1199 | |||
| 1200 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | ||
| 1201 | if (!dp) | ||
| 1202 | return -ENOMEM; | ||
| 1203 | |||
| 1204 | port->extcon = extcon; | ||
| 1205 | port->phy = phy; | ||
| 1206 | port->dp = dp; | ||
| 1207 | port->id = i; | ||
| 1208 | dp->port[dp->ports++] = port; | ||
| 1209 | } | ||
| 1210 | |||
| 1211 | if (!dp->ports) { | ||
| 1212 | DRM_DEV_ERROR(dev, "missing extcon or phy\n"); | ||
| 1213 | return -EINVAL; | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | mutex_init(&dp->lock); | ||
| 1217 | dev_set_drvdata(dev, dp); | ||
| 1218 | |||
| 1219 | return component_add(dev, &cdn_dp_component_ops); | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | static int cdn_dp_remove(struct platform_device *pdev) | ||
| 1223 | { | ||
| 1224 | struct cdn_dp_device *dp = platform_get_drvdata(pdev); | ||
| 1225 | |||
| 1226 | cdn_dp_suspend(dp->dev); | ||
| 1227 | component_del(&pdev->dev, &cdn_dp_component_ops); | ||
| 1228 | |||
| 1229 | return 0; | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | static void cdn_dp_shutdown(struct platform_device *pdev) | ||
| 1233 | { | ||
| 1234 | struct cdn_dp_device *dp = platform_get_drvdata(pdev); | ||
| 1235 | |||
| 1236 | cdn_dp_suspend(dp->dev); | ||
| 1237 | } | ||
| 1238 | |||
| 1239 | static const struct dev_pm_ops cdn_dp_pm_ops = { | ||
| 1240 | SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend, | ||
| 1241 | cdn_dp_resume) | ||
| 1242 | }; | ||
| 1243 | |||
| 1244 | static struct platform_driver cdn_dp_driver = { | ||
| 1245 | .probe = cdn_dp_probe, | ||
| 1246 | .remove = cdn_dp_remove, | ||
| 1247 | .shutdown = cdn_dp_shutdown, | ||
| 1248 | .driver = { | ||
| 1249 | .name = "cdn-dp", | ||
| 1250 | .owner = THIS_MODULE, | ||
| 1251 | .of_match_table = of_match_ptr(cdn_dp_dt_ids), | ||
| 1252 | .pm = &cdn_dp_pm_ops, | ||
| 1253 | }, | ||
| 1254 | }; | ||
| 1255 | |||
| 1256 | module_platform_driver(cdn_dp_driver); | ||
| 1257 | |||
| 1258 | MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>"); | ||
| 1259 | MODULE_DESCRIPTION("cdn DP Driver"); | ||
| 1260 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h new file mode 100644 index 000000000000..f57e296401b8 --- /dev/null +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Chris Zhong <zyw@rock-chips.com> | ||
| 3 | * Copyright (C) 2016 ROCKCHIP, Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef _CDN_DP_CORE_H | ||
| 16 | #define _CDN_DP_CORE_H | ||
| 17 | |||
| 18 | #include <drm/drmP.h> | ||
| 19 | #include <drm/drm_crtc_helper.h> | ||
| 20 | #include <drm/drm_dp_helper.h> | ||
| 21 | #include <drm/drm_panel.h> | ||
| 22 | #include "rockchip_drm_drv.h" | ||
| 23 | |||
| 24 | #define MAX_PHY 2 | ||
| 25 | |||
| 26 | enum audio_format { | ||
| 27 | AFMT_I2S = 0, | ||
| 28 | AFMT_SPDIF = 1, | ||
| 29 | AFMT_UNUSED, | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct audio_info { | ||
| 33 | enum audio_format format; | ||
| 34 | int sample_rate; | ||
| 35 | int channels; | ||
| 36 | int sample_width; | ||
| 37 | }; | ||
| 38 | |||
| 39 | enum vic_pxl_encoding_format { | ||
| 40 | PXL_RGB = 0x1, | ||
| 41 | YCBCR_4_4_4 = 0x2, | ||
| 42 | YCBCR_4_2_2 = 0x4, | ||
| 43 | YCBCR_4_2_0 = 0x8, | ||
| 44 | Y_ONLY = 0x10, | ||
| 45 | }; | ||
| 46 | |||
| 47 | struct video_info { | ||
| 48 | bool h_sync_polarity; | ||
| 49 | bool v_sync_polarity; | ||
| 50 | bool interlaced; | ||
| 51 | int color_depth; | ||
| 52 | enum vic_pxl_encoding_format color_fmt; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct cdn_firmware_header { | ||
| 56 | u32 size_bytes; /* size of the entire header+image(s) in bytes */ | ||
| 57 | u32 header_size; /* size of just the header in bytes */ | ||
| 58 | u32 iram_size; /* size of iram */ | ||
| 59 | u32 dram_size; /* size of dram */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct cdn_dp_port { | ||
| 63 | struct cdn_dp_device *dp; | ||
| 64 | struct notifier_block event_nb; | ||
| 65 | struct extcon_dev *extcon; | ||
| 66 | struct phy *phy; | ||
| 67 | u8 lanes; | ||
| 68 | bool phy_enabled; | ||
| 69 | u8 id; | ||
| 70 | }; | ||
| 71 | |||
| 72 | struct cdn_dp_device { | ||
| 73 | struct device *dev; | ||
| 74 | struct drm_device *drm_dev; | ||
| 75 | struct drm_connector connector; | ||
| 76 | struct drm_encoder encoder; | ||
| 77 | struct drm_display_mode mode; | ||
| 78 | struct platform_device *audio_pdev; | ||
| 79 | struct work_struct event_work; | ||
| 80 | struct edid *edid; | ||
| 81 | |||
| 82 | struct mutex lock; | ||
| 83 | bool connected; | ||
| 84 | bool active; | ||
| 85 | bool suspended; | ||
| 86 | |||
| 87 | const struct firmware *fw; /* cdn dp firmware */ | ||
| 88 | unsigned int fw_version; /* cdn fw version */ | ||
| 89 | bool fw_loaded; | ||
| 90 | |||
| 91 | void __iomem *regs; | ||
| 92 | struct regmap *grf; | ||
| 93 | struct clk *core_clk; | ||
| 94 | struct clk *pclk; | ||
| 95 | struct clk *spdif_clk; | ||
| 96 | struct clk *grf_clk; | ||
| 97 | struct reset_control *spdif_rst; | ||
| 98 | struct reset_control *dptx_rst; | ||
| 99 | struct reset_control *apb_rst; | ||
| 100 | struct reset_control *core_rst; | ||
| 101 | struct audio_info audio_info; | ||
| 102 | struct video_info video_info; | ||
| 103 | struct drm_dp_link link; | ||
| 104 | struct cdn_dp_port *port[MAX_PHY]; | ||
| 105 | u8 ports; | ||
| 106 | u8 lanes; | ||
| 107 | int active_port; | ||
| 108 | |||
| 109 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; | ||
| 110 | bool sink_has_audio; | ||
| 111 | }; | ||
| 112 | #endif /* _CDN_DP_CORE_H */ | ||
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c new file mode 100644 index 000000000000..319dbbaa3609 --- /dev/null +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c | |||
| @@ -0,0 +1,979 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
| 3 | * Author: Chris Zhong <zyw@rock-chips.com> | ||
| 4 | * | ||
| 5 | * This software is licensed under the terms of the GNU General Public | ||
| 6 | * License version 2, as published by the Free Software Foundation, and | ||
| 7 | * may be copied, distributed, and modified under those terms. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/clk.h> | ||
| 16 | #include <linux/device.h> | ||
| 17 | #include <linux/delay.h> | ||
| 18 | #include <linux/io.h> | ||
| 19 | #include <linux/iopoll.h> | ||
| 20 | #include <linux/reset.h> | ||
| 21 | |||
| 22 | #include "cdn-dp-core.h" | ||
| 23 | #include "cdn-dp-reg.h" | ||
| 24 | |||
| 25 | #define CDN_DP_SPDIF_CLK 200000000 | ||
| 26 | #define FW_ALIVE_TIMEOUT_US 1000000 | ||
| 27 | #define MAILBOX_RETRY_US 1000 | ||
| 28 | #define MAILBOX_TIMEOUT_US 5000000 | ||
| 29 | #define LINK_TRAINING_RETRY_MS 20 | ||
| 30 | #define LINK_TRAINING_TIMEOUT_MS 500 | ||
| 31 | |||
| 32 | void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, u32 clk) | ||
| 33 | { | ||
| 34 | writel(clk / 1000000, dp->regs + SW_CLK_H); | ||
| 35 | } | ||
| 36 | |||
| 37 | void cdn_dp_clock_reset(struct cdn_dp_device *dp) | ||
| 38 | { | ||
| 39 | u32 val; | ||
| 40 | |||
| 41 | val = DPTX_FRMR_DATA_CLK_RSTN_EN | | ||
| 42 | DPTX_FRMR_DATA_CLK_EN | | ||
| 43 | DPTX_PHY_DATA_RSTN_EN | | ||
| 44 | DPTX_PHY_DATA_CLK_EN | | ||
| 45 | DPTX_PHY_CHAR_RSTN_EN | | ||
| 46 | DPTX_PHY_CHAR_CLK_EN | | ||
| 47 | SOURCE_AUX_SYS_CLK_RSTN_EN | | ||
| 48 | SOURCE_AUX_SYS_CLK_EN | | ||
| 49 | DPTX_SYS_CLK_RSTN_EN | | ||
| 50 | DPTX_SYS_CLK_EN | | ||
| 51 | CFG_DPTX_VIF_CLK_RSTN_EN | | ||
| 52 | CFG_DPTX_VIF_CLK_EN; | ||
| 53 | writel(val, dp->regs + SOURCE_DPTX_CAR); | ||
| 54 | |||
| 55 | val = SOURCE_PHY_RSTN_EN | SOURCE_PHY_CLK_EN; | ||
| 56 | writel(val, dp->regs + SOURCE_PHY_CAR); | ||
| 57 | |||
| 58 | val = SOURCE_PKT_SYS_RSTN_EN | | ||
| 59 | SOURCE_PKT_SYS_CLK_EN | | ||
| 60 | SOURCE_PKT_DATA_RSTN_EN | | ||
| 61 | SOURCE_PKT_DATA_CLK_EN; | ||
| 62 | writel(val, dp->regs + SOURCE_PKT_CAR); | ||
| 63 | |||
| 64 | val = SPDIF_CDR_CLK_RSTN_EN | | ||
| 65 | SPDIF_CDR_CLK_EN | | ||
| 66 | SOURCE_AIF_SYS_RSTN_EN | | ||
| 67 | SOURCE_AIF_SYS_CLK_EN | | ||
| 68 | SOURCE_AIF_CLK_RSTN_EN | | ||
| 69 | SOURCE_AIF_CLK_EN; | ||
| 70 | writel(val, dp->regs + SOURCE_AIF_CAR); | ||
| 71 | |||
| 72 | val = SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN | | ||
| 73 | SOURCE_CIPHER_SYS_CLK_EN | | ||
| 74 | SOURCE_CIPHER_CHAR_CLK_RSTN_EN | | ||
| 75 | SOURCE_CIPHER_CHAR_CLK_EN; | ||
| 76 | writel(val, dp->regs + SOURCE_CIPHER_CAR); | ||
| 77 | |||
| 78 | val = SOURCE_CRYPTO_SYS_CLK_RSTN_EN | | ||
| 79 | SOURCE_CRYPTO_SYS_CLK_EN; | ||
| 80 | writel(val, dp->regs + SOURCE_CRYPTO_CAR); | ||
| 81 | |||
| 82 | /* enable Mailbox and PIF interrupt */ | ||
| 83 | writel(0, dp->regs + APB_INT_MASK); | ||
| 84 | } | ||
| 85 | |||
| 86 | static int cdn_dp_mailbox_read(struct cdn_dp_device *dp) | ||
| 87 | { | ||
| 88 | int val, ret; | ||
| 89 | |||
| 90 | ret = readx_poll_timeout(readl, dp->regs + MAILBOX_EMPTY_ADDR, | ||
| 91 | val, !val, MAILBOX_RETRY_US, | ||
| 92 | MAILBOX_TIMEOUT_US); | ||
| 93 | if (ret < 0) | ||
| 94 | return ret; | ||
| 95 | |||
| 96 | return readl(dp->regs + MAILBOX0_RD_DATA) & 0xff; | ||
| 97 | } | ||
| 98 | |||
| 99 | static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val) | ||
| 100 | { | ||
| 101 | int ret, full; | ||
| 102 | |||
| 103 | ret = readx_poll_timeout(readl, dp->regs + MAILBOX_FULL_ADDR, | ||
| 104 | full, !full, MAILBOX_RETRY_US, | ||
| 105 | MAILBOX_TIMEOUT_US); | ||
| 106 | if (ret < 0) | ||
| 107 | return ret; | ||
| 108 | |||
| 109 | writel(val, dp->regs + MAILBOX0_WR_DATA); | ||
| 110 | |||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp, | ||
| 115 | u8 module_id, u8 opcode, | ||
| 116 | u8 req_size) | ||
| 117 | { | ||
| 118 | u32 mbox_size, i; | ||
| 119 | u8 header[4]; | ||
| 120 | int ret; | ||
| 121 | |||
| 122 | /* read the header of the message */ | ||
| 123 | for (i = 0; i < 4; i++) { | ||
| 124 | ret = cdn_dp_mailbox_read(dp); | ||
| 125 | if (ret < 0) | ||
| 126 | return ret; | ||
| 127 | |||
| 128 | header[i] = ret; | ||
| 129 | } | ||
| 130 | |||
| 131 | mbox_size = (header[2] << 8) | header[3]; | ||
| 132 | |||
| 133 | if (opcode != header[0] || module_id != header[1] || | ||
| 134 | req_size != mbox_size) { | ||
| 135 | /* | ||
| 136 | * If the message in mailbox is not what we want, we need to | ||
| 137 | * clear the mailbox by reading its contents. | ||
| 138 | */ | ||
| 139 | for (i = 0; i < mbox_size; i++) | ||
| 140 | if (cdn_dp_mailbox_read(dp) < 0) | ||
| 141 | break; | ||
| 142 | |||
| 143 | return -EINVAL; | ||
| 144 | } | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp, | ||
| 150 | u8 *buff, u8 buff_size) | ||
| 151 | { | ||
| 152 | u32 i; | ||
| 153 | int ret; | ||
| 154 | |||
| 155 | for (i = 0; i < buff_size; i++) { | ||
| 156 | ret = cdn_dp_mailbox_read(dp); | ||
| 157 | if (ret < 0) | ||
| 158 | return ret; | ||
| 159 | |||
| 160 | buff[i] = ret; | ||
| 161 | } | ||
| 162 | |||
| 163 | return 0; | ||
| 164 | } | ||
| 165 | |||
| 166 | static int cdn_dp_mailbox_send(struct cdn_dp_device *dp, u8 module_id, | ||
| 167 | u8 opcode, u16 size, u8 *message) | ||
| 168 | { | ||
| 169 | u8 header[4]; | ||
| 170 | int ret, i; | ||
| 171 | |||
| 172 | header[0] = opcode; | ||
| 173 | header[1] = module_id; | ||
| 174 | header[2] = (size >> 8) & 0xff; | ||
| 175 | header[3] = size & 0xff; | ||
| 176 | |||
| 177 | for (i = 0; i < 4; i++) { | ||
| 178 | ret = cdp_dp_mailbox_write(dp, header[i]); | ||
| 179 | if (ret) | ||
| 180 | return ret; | ||
| 181 | } | ||
| 182 | |||
| 183 | for (i = 0; i < size; i++) { | ||
| 184 | ret = cdp_dp_mailbox_write(dp, message[i]); | ||
| 185 | if (ret) | ||
| 186 | return ret; | ||
| 187 | } | ||
| 188 | |||
| 189 | return 0; | ||
| 190 | } | ||
| 191 | |||
| 192 | static int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val) | ||
| 193 | { | ||
| 194 | u8 msg[6]; | ||
| 195 | |||
| 196 | msg[0] = (addr >> 8) & 0xff; | ||
| 197 | msg[1] = addr & 0xff; | ||
| 198 | msg[2] = (val >> 24) & 0xff; | ||
| 199 | msg[3] = (val >> 16) & 0xff; | ||
| 200 | msg[4] = (val >> 8) & 0xff; | ||
| 201 | msg[5] = val & 0xff; | ||
| 202 | return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_REGISTER, | ||
| 203 | sizeof(msg), msg); | ||
| 204 | } | ||
| 205 | |||
| 206 | static int cdn_dp_reg_write_bit(struct cdn_dp_device *dp, u16 addr, | ||
| 207 | u8 start_bit, u8 bits_no, u32 val) | ||
| 208 | { | ||
| 209 | u8 field[8]; | ||
| 210 | |||
| 211 | field[0] = (addr >> 8) & 0xff; | ||
| 212 | field[1] = addr & 0xff; | ||
| 213 | field[2] = start_bit; | ||
| 214 | field[3] = bits_no; | ||
| 215 | field[4] = (val >> 24) & 0xff; | ||
| 216 | field[5] = (val >> 16) & 0xff; | ||
| 217 | field[6] = (val >> 8) & 0xff; | ||
| 218 | field[7] = val & 0xff; | ||
| 219 | |||
| 220 | return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_FIELD, | ||
| 221 | sizeof(field), field); | ||
| 222 | } | ||
| 223 | |||
| 224 | int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len) | ||
| 225 | { | ||
| 226 | u8 msg[5], reg[5]; | ||
| 227 | int ret; | ||
| 228 | |||
| 229 | msg[0] = (len >> 8) & 0xff; | ||
| 230 | msg[1] = len & 0xff; | ||
| 231 | msg[2] = (addr >> 16) & 0xff; | ||
| 232 | msg[3] = (addr >> 8) & 0xff; | ||
| 233 | msg[4] = addr & 0xff; | ||
| 234 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD, | ||
| 235 | sizeof(msg), msg); | ||
| 236 | if (ret) | ||
| 237 | goto err_dpcd_read; | ||
| 238 | |||
| 239 | ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, | ||
| 240 | DPTX_READ_DPCD, | ||
| 241 | sizeof(reg) + len); | ||
| 242 | if (ret) | ||
| 243 | goto err_dpcd_read; | ||
| 244 | |||
| 245 | ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg)); | ||
| 246 | if (ret) | ||
| 247 | goto err_dpcd_read; | ||
| 248 | |||
| 249 | ret = cdn_dp_mailbox_read_receive(dp, data, len); | ||
| 250 | |||
| 251 | err_dpcd_read: | ||
| 252 | return ret; | ||
| 253 | } | ||
| 254 | |||
| 255 | int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value) | ||
| 256 | { | ||
| 257 | u8 msg[6], reg[5]; | ||
| 258 | int ret; | ||
| 259 | |||
| 260 | msg[0] = 0; | ||
| 261 | msg[1] = 1; | ||
| 262 | msg[2] = (addr >> 16) & 0xff; | ||
| 263 | msg[3] = (addr >> 8) & 0xff; | ||
| 264 | msg[4] = addr & 0xff; | ||
| 265 | msg[5] = value; | ||
| 266 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD, | ||
| 267 | sizeof(msg), msg); | ||
| 268 | if (ret) | ||
| 269 | goto err_dpcd_write; | ||
| 270 | |||
| 271 | ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, | ||
| 272 | DPTX_WRITE_DPCD, sizeof(reg)); | ||
| 273 | if (ret) | ||
| 274 | goto err_dpcd_write; | ||
| 275 | |||
| 276 | ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg)); | ||
| 277 | if (ret) | ||
| 278 | goto err_dpcd_write; | ||
| 279 | |||
| 280 | if (addr != (reg[2] << 16 | reg[3] << 8 | reg[4])) | ||
| 281 | ret = -EINVAL; | ||
| 282 | |||
| 283 | err_dpcd_write: | ||
| 284 | if (ret) | ||
| 285 | DRM_DEV_ERROR(dp->dev, "dpcd write failed: %d\n", ret); | ||
| 286 | return ret; | ||
| 287 | } | ||
| 288 | |||
| 289 | int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem, | ||
| 290 | u32 i_size, const u32 *d_mem, u32 d_size) | ||
| 291 | { | ||
| 292 | u32 reg; | ||
| 293 | int i, ret; | ||
| 294 | |||
| 295 | /* reset ucpu before load firmware*/ | ||
| 296 | writel(APB_IRAM_PATH | APB_DRAM_PATH | APB_XT_RESET, | ||
| 297 | dp->regs + APB_CTRL); | ||
| 298 | |||
| 299 | for (i = 0; i < i_size; i += 4) | ||
| 300 | writel(*i_mem++, dp->regs + ADDR_IMEM + i); | ||
| 301 | |||
| 302 | for (i = 0; i < d_size; i += 4) | ||
| 303 | writel(*d_mem++, dp->regs + ADDR_DMEM + i); | ||
| 304 | |||
| 305 | /* un-reset ucpu */ | ||
| 306 | writel(0, dp->regs + APB_CTRL); | ||
| 307 | |||
| 308 | /* check the keep alive register to make sure fw working */ | ||
| 309 | ret = readx_poll_timeout(readl, dp->regs + KEEP_ALIVE, | ||
| 310 | reg, reg, 2000, FW_ALIVE_TIMEOUT_US); | ||
| 311 | if (ret < 0) { | ||
| 312 | DRM_DEV_ERROR(dp->dev, "failed to loaded the FW reg = %x\n", | ||
| 313 | reg); | ||
| 314 | return -EINVAL; | ||
| 315 | } | ||
| 316 | |||
| 317 | reg = readl(dp->regs + VER_L) & 0xff; | ||
| 318 | dp->fw_version = reg; | ||
| 319 | reg = readl(dp->regs + VER_H) & 0xff; | ||
| 320 | dp->fw_version |= reg << 8; | ||
| 321 | reg = readl(dp->regs + VER_LIB_L_ADDR) & 0xff; | ||
| 322 | dp->fw_version |= reg << 16; | ||
| 323 | reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff; | ||
| 324 | dp->fw_version |= reg << 24; | ||
| 325 | |||
| 326 | dev_dbg(dp->dev, "firmware version: %x\n", dp->fw_version); | ||
| 327 | |||
| 328 | return 0; | ||
| 329 | } | ||
| 330 | |||
| 331 | int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable) | ||
| 332 | { | ||
| 333 | u8 msg[5]; | ||
| 334 | int ret, i; | ||
| 335 | |||
| 336 | msg[0] = GENERAL_MAIN_CONTROL; | ||
| 337 | msg[1] = MB_MODULE_ID_GENERAL; | ||
| 338 | msg[2] = 0; | ||
| 339 | msg[3] = 1; | ||
| 340 | msg[4] = enable ? FW_ACTIVE : FW_STANDBY; | ||
| 341 | |||
| 342 | for (i = 0; i < sizeof(msg); i++) { | ||
| 343 | ret = cdp_dp_mailbox_write(dp, msg[i]); | ||
| 344 | if (ret) | ||
| 345 | goto err_set_firmware_active; | ||
| 346 | } | ||
| 347 | |||
| 348 | /* read the firmware state */ | ||
| 349 | for (i = 0; i < sizeof(msg); i++) { | ||
| 350 | ret = cdn_dp_mailbox_read(dp); | ||
| 351 | if (ret < 0) | ||
| 352 | goto err_set_firmware_active; | ||
| 353 | |||
| 354 | msg[i] = ret; | ||
| 355 | } | ||
| 356 | |||
| 357 | ret = 0; | ||
| 358 | |||
| 359 | err_set_firmware_active: | ||
| 360 | if (ret < 0) | ||
| 361 | DRM_DEV_ERROR(dp->dev, "set firmware active failed\n"); | ||
| 362 | return ret; | ||
| 363 | } | ||
| 364 | |||
| 365 | int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip) | ||
| 366 | { | ||
| 367 | u8 msg[8]; | ||
| 368 | int ret; | ||
| 369 | |||
| 370 | msg[0] = CDN_DP_MAX_LINK_RATE; | ||
| 371 | msg[1] = lanes | SCRAMBLER_EN; | ||
| 372 | msg[2] = VOLTAGE_LEVEL_2; | ||
| 373 | msg[3] = PRE_EMPHASIS_LEVEL_3; | ||
| 374 | msg[4] = PTS1 | PTS2 | PTS3 | PTS4; | ||
| 375 | msg[5] = FAST_LT_NOT_SUPPORT; | ||
| 376 | msg[6] = flip ? LANE_MAPPING_FLIPPED : LANE_MAPPING_NORMAL; | ||
| 377 | msg[7] = ENHANCED; | ||
| 378 | |||
| 379 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, | ||
| 380 | DPTX_SET_HOST_CAPABILITIES, | ||
| 381 | sizeof(msg), msg); | ||
| 382 | if (ret) | ||
| 383 | goto err_set_host_cap; | ||
| 384 | |||
| 385 | ret = cdn_dp_reg_write(dp, DP_AUX_SWAP_INVERSION_CONTROL, | ||
| 386 | AUX_HOST_INVERT); | ||
| 387 | |||
| 388 | err_set_host_cap: | ||
| 389 | if (ret) | ||
| 390 | DRM_DEV_ERROR(dp->dev, "set host cap failed: %d\n", ret); | ||
| 391 | return ret; | ||
| 392 | } | ||
| 393 | |||
| 394 | int cdn_dp_event_config(struct cdn_dp_device *dp) | ||
| 395 | { | ||
| 396 | u8 msg[5]; | ||
| 397 | int ret; | ||
| 398 | |||
| 399 | memset(msg, 0, sizeof(msg)); | ||
| 400 | |||
| 401 | msg[0] = DPTX_EVENT_ENABLE_HPD | DPTX_EVENT_ENABLE_TRAINING; | ||
| 402 | |||
| 403 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_ENABLE_EVENT, | ||
| 404 | sizeof(msg), msg); | ||
| 405 | if (ret) | ||
| 406 | DRM_DEV_ERROR(dp->dev, "set event config failed: %d\n", ret); | ||
| 407 | |||
| 408 | return ret; | ||
| 409 | } | ||
| 410 | |||
| 411 | u32 cdn_dp_get_event(struct cdn_dp_device *dp) | ||
| 412 | { | ||
| 413 | return readl(dp->regs + SW_EVENTS0); | ||
| 414 | } | ||
| 415 | |||
| 416 | int cdn_dp_get_hpd_status(struct cdn_dp_device *dp) | ||
| 417 | { | ||
| 418 | u8 status; | ||
| 419 | int ret; | ||
| 420 | |||
| 421 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE, | ||
| 422 | 0, NULL); | ||
| 423 | if (ret) | ||
| 424 | goto err_get_hpd; | ||
| 425 | |||
| 426 | ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, | ||
| 427 | DPTX_HPD_STATE, sizeof(status)); | ||
| 428 | if (ret) | ||
| 429 | goto err_get_hpd; | ||
| 430 | |||
| 431 | ret = cdn_dp_mailbox_read_receive(dp, &status, sizeof(status)); | ||
| 432 | if (ret) | ||
| 433 | goto err_get_hpd; | ||
| 434 | |||
| 435 | return status; | ||
| 436 | |||
| 437 | err_get_hpd: | ||
| 438 | DRM_DEV_ERROR(dp->dev, "get hpd status failed: %d\n", ret); | ||
| 439 | return ret; | ||
| 440 | } | ||
| 441 | |||
| 442 | int cdn_dp_get_edid_block(void *data, u8 *edid, | ||
| 443 | unsigned int block, size_t length) | ||
| 444 | { | ||
| 445 | struct cdn_dp_device *dp = data; | ||
| 446 | u8 msg[2], reg[2], i; | ||
| 447 | int ret; | ||
| 448 | |||
| 449 | for (i = 0; i < 4; i++) { | ||
| 450 | msg[0] = block / 2; | ||
| 451 | msg[1] = block % 2; | ||
| 452 | |||
| 453 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID, | ||
| 454 | sizeof(msg), msg); | ||
| 455 | if (ret) | ||
| 456 | continue; | ||
| 457 | |||
| 458 | ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, | ||
| 459 | DPTX_GET_EDID, | ||
| 460 | sizeof(reg) + length); | ||
| 461 | if (ret) | ||
| 462 | continue; | ||
| 463 | |||
| 464 | ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg)); | ||
| 465 | if (ret) | ||
| 466 | continue; | ||
| 467 | |||
| 468 | ret = cdn_dp_mailbox_read_receive(dp, edid, length); | ||
| 469 | if (ret) | ||
| 470 | continue; | ||
| 471 | |||
| 472 | if (reg[0] == length && reg[1] == block / 2) | ||
| 473 | break; | ||
| 474 | } | ||
| 475 | |||
| 476 | if (ret) | ||
| 477 | DRM_DEV_ERROR(dp->dev, "get block[%d] edid failed: %d\n", block, | ||
| 478 | ret); | ||
| 479 | |||
| 480 | return ret; | ||
| 481 | } | ||
| 482 | |||
| 483 | static int cdn_dp_training_start(struct cdn_dp_device *dp) | ||
| 484 | { | ||
| 485 | unsigned long timeout; | ||
| 486 | u8 msg, event[2]; | ||
| 487 | int ret; | ||
| 488 | |||
| 489 | msg = LINK_TRAINING_RUN; | ||
| 490 | |||
| 491 | /* start training */ | ||
| 492 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_TRAINING_CONTROL, | ||
| 493 | sizeof(msg), &msg); | ||
| 494 | if (ret) | ||
| 495 | goto err_training_start; | ||
| 496 | |||
| 497 | timeout = jiffies + msecs_to_jiffies(LINK_TRAINING_TIMEOUT_MS); | ||
| 498 | while (time_before(jiffies, timeout)) { | ||
| 499 | msleep(LINK_TRAINING_RETRY_MS); | ||
| 500 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, | ||
| 501 | DPTX_READ_EVENT, 0, NULL); | ||
| 502 | if (ret) | ||
| 503 | goto err_training_start; | ||
| 504 | |||
| 505 | ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, | ||
| 506 | DPTX_READ_EVENT, | ||
| 507 | sizeof(event)); | ||
| 508 | if (ret) | ||
| 509 | goto err_training_start; | ||
| 510 | |||
| 511 | ret = cdn_dp_mailbox_read_receive(dp, event, sizeof(event)); | ||
| 512 | if (ret) | ||
| 513 | goto err_training_start; | ||
| 514 | |||
| 515 | if (event[1] & EQ_PHASE_FINISHED) | ||
| 516 | return 0; | ||
| 517 | } | ||
| 518 | |||
| 519 | ret = -ETIMEDOUT; | ||
| 520 | |||
| 521 | err_training_start: | ||
| 522 | DRM_DEV_ERROR(dp->dev, "training failed: %d\n", ret); | ||
| 523 | return ret; | ||
| 524 | } | ||
| 525 | |||
| 526 | static int cdn_dp_get_training_status(struct cdn_dp_device *dp) | ||
| 527 | { | ||
| 528 | u8 status[10]; | ||
| 529 | int ret; | ||
| 530 | |||
| 531 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_LINK_STAT, | ||
| 532 | 0, NULL); | ||
| 533 | if (ret) | ||
| 534 | goto err_get_training_status; | ||
| 535 | |||
| 536 | ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, | ||
| 537 | DPTX_READ_LINK_STAT, | ||
| 538 | sizeof(status)); | ||
| 539 | if (ret) | ||
| 540 | goto err_get_training_status; | ||
| 541 | |||
| 542 | ret = cdn_dp_mailbox_read_receive(dp, status, sizeof(status)); | ||
| 543 | if (ret) | ||
| 544 | goto err_get_training_status; | ||
| 545 | |||
| 546 | dp->link.rate = status[0]; | ||
| 547 | dp->link.num_lanes = status[1]; | ||
| 548 | |||
| 549 | err_get_training_status: | ||
| 550 | if (ret) | ||
| 551 | DRM_DEV_ERROR(dp->dev, "get training status failed: %d\n", ret); | ||
| 552 | return ret; | ||
| 553 | } | ||
| 554 | |||
| 555 | int cdn_dp_train_link(struct cdn_dp_device *dp) | ||
| 556 | { | ||
| 557 | int ret; | ||
| 558 | |||
| 559 | ret = cdn_dp_training_start(dp); | ||
| 560 | if (ret) { | ||
| 561 | DRM_DEV_ERROR(dp->dev, "Failed to start training %d\n", ret); | ||
| 562 | return ret; | ||
| 563 | } | ||
| 564 | |||
| 565 | ret = cdn_dp_get_training_status(dp); | ||
| 566 | if (ret) { | ||
| 567 | DRM_DEV_ERROR(dp->dev, "Failed to get training stat %d\n", ret); | ||
| 568 | return ret; | ||
| 569 | } | ||
| 570 | |||
| 571 | DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate, | ||
| 572 | dp->link.num_lanes); | ||
| 573 | return ret; | ||
| 574 | } | ||
| 575 | |||
| 576 | int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active) | ||
| 577 | { | ||
| 578 | u8 msg; | ||
| 579 | int ret; | ||
| 580 | |||
| 581 | msg = !!active; | ||
| 582 | |||
| 583 | ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_SET_VIDEO, | ||
| 584 | sizeof(msg), &msg); | ||
| 585 | if (ret) | ||
| 586 | DRM_DEV_ERROR(dp->dev, "set video status failed: %d\n", ret); | ||
| 587 | |||
| 588 | return ret; | ||
| 589 | } | ||
| 590 | |||
| 591 | static int cdn_dp_get_msa_misc(struct video_info *video, | ||
| 592 | struct drm_display_mode *mode) | ||
| 593 | { | ||
| 594 | u32 msa_misc; | ||
| 595 | u8 val[2] = {0}; | ||
| 596 | |||
| 597 | switch (video->color_fmt) { | ||
| 598 | case PXL_RGB: | ||
| 599 | case Y_ONLY: | ||
| 600 | val[0] = 0; | ||
| 601 | break; | ||
| 602 | /* set YUV default color space conversion to BT601 */ | ||
| 603 | case YCBCR_4_4_4: | ||
| 604 | val[0] = 6 + BT_601 * 8; | ||
| 605 | break; | ||
| 606 | case YCBCR_4_2_2: | ||
| 607 | val[0] = 5 + BT_601 * 8; | ||
| 608 | break; | ||
| 609 | case YCBCR_4_2_0: | ||
| 610 | val[0] = 5; | ||
| 611 | break; | ||
| 612 | }; | ||
| 613 | |||
| 614 | switch (video->color_depth) { | ||
| 615 | case 6: | ||
| 616 | val[1] = 0; | ||
| 617 | break; | ||
| 618 | case 8: | ||
| 619 | val[1] = 1; | ||
| 620 | break; | ||
| 621 | case 10: | ||
| 622 | val[1] = 2; | ||
| 623 | break; | ||
| 624 | case 12: | ||
| 625 | val[1] = 3; | ||
| 626 | break; | ||
| 627 | case 16: | ||
| 628 | val[1] = 4; | ||
| 629 | break; | ||
| 630 | }; | ||
| 631 | |||
| 632 | msa_misc = 2 * val[0] + 32 * val[1] + | ||
| 633 | ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0); | ||
| 634 | |||
| 635 | return msa_misc; | ||
| 636 | } | ||
| 637 | |||
| 638 | int cdn_dp_config_video(struct cdn_dp_device *dp) | ||
| 639 | { | ||
| 640 | struct video_info *video = &dp->video_info; | ||
| 641 | struct drm_display_mode *mode = &dp->mode; | ||
| 642 | u64 symbol; | ||
| 643 | u32 val, link_rate, rem; | ||
| 644 | u8 bit_per_pix, tu_size_reg = TU_SIZE; | ||
| 645 | int ret; | ||
| 646 | |||
| 647 | bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ? | ||
| 648 | (video->color_depth * 2) : (video->color_depth * 3); | ||
| 649 | |||
| 650 | link_rate = drm_dp_bw_code_to_link_rate(dp->link.rate) / 1000; | ||
| 651 | |||
| 652 | ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE); | ||
| 653 | if (ret) | ||
| 654 | goto err_config_video; | ||
| 655 | |||
| 656 | ret = cdn_dp_reg_write(dp, HSYNC2VSYNC_POL_CTRL, 0); | ||
| 657 | if (ret) | ||
| 658 | goto err_config_video; | ||
| 659 | |||
| 660 | /* | ||
| 661 | * get a best tu_size and valid symbol: | ||
| 662 | * 1. chose Lclk freq(162Mhz, 270Mhz, 540Mhz), set TU to 32 | ||
| 663 | * 2. calculate VS(valid symbol) = TU * Pclk * Bpp / (Lclk * Lanes) | ||
| 664 | * 3. if VS > *.85 or VS < *.1 or VS < 2 or TU < VS + 4, then set | ||
| 665 | * TU += 2 and repeat 2nd step. | ||
| 666 | */ | ||
| 667 | do { | ||
| 668 | tu_size_reg += 2; | ||
| 669 | symbol = tu_size_reg * mode->clock * bit_per_pix; | ||
| 670 | do_div(symbol, dp->link.num_lanes * link_rate * 8); | ||
| 671 | rem = do_div(symbol, 1000); | ||
| 672 | if (tu_size_reg > 64) { | ||
| 673 | ret = -EINVAL; | ||
| 674 | goto err_config_video; | ||
| 675 | } | ||
| 676 | } while ((symbol <= 1) || (tu_size_reg - symbol < 4) || | ||
| 677 | (rem > 850) || (rem < 100)); | ||
| 678 | |||
| 679 | val = symbol + (tu_size_reg << 8); | ||
| 680 | val |= TU_CNT_RST_EN; | ||
| 681 | ret = cdn_dp_reg_write(dp, DP_FRAMER_TU, val); | ||
| 682 | if (ret) | ||
| 683 | goto err_config_video; | ||
| 684 | |||
| 685 | /* set the FIFO Buffer size */ | ||
| 686 | val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate; | ||
| 687 | val /= (dp->link.num_lanes * link_rate); | ||
| 688 | val = div_u64(8 * (symbol + 1), bit_per_pix) - val; | ||
| 689 | val += 2; | ||
| 690 | ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val); | ||
| 691 | |||
| 692 | switch (video->color_depth) { | ||
| 693 | case 6: | ||
| 694 | val = BCS_6; | ||
| 695 | break; | ||
| 696 | case 8: | ||
| 697 | val = BCS_8; | ||
| 698 | break; | ||
| 699 | case 10: | ||
| 700 | val = BCS_10; | ||
| 701 | break; | ||
| 702 | case 12: | ||
| 703 | val = BCS_12; | ||
| 704 | break; | ||
| 705 | case 16: | ||
| 706 | val = BCS_16; | ||
| 707 | break; | ||
| 708 | }; | ||
| 709 | |||
| 710 | val += video->color_fmt << 8; | ||
| 711 | ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val); | ||
| 712 | if (ret) | ||
| 713 | goto err_config_video; | ||
| 714 | |||
| 715 | val = video->h_sync_polarity ? DP_FRAMER_SP_HSP : 0; | ||
| 716 | val |= video->v_sync_polarity ? DP_FRAMER_SP_VSP : 0; | ||
| 717 | ret = cdn_dp_reg_write(dp, DP_FRAMER_SP, val); | ||
| 718 | if (ret) | ||
| 719 | goto err_config_video; | ||
| 720 | |||
| 721 | val = (mode->hsync_start - mode->hdisplay) << 16; | ||
| 722 | val |= mode->htotal - mode->hsync_end; | ||
| 723 | ret = cdn_dp_reg_write(dp, DP_FRONT_BACK_PORCH, val); | ||
| 724 | if (ret) | ||
| 725 | goto err_config_video; | ||
| 726 | |||
| 727 | val = mode->hdisplay * bit_per_pix / 8; | ||
| 728 | ret = cdn_dp_reg_write(dp, DP_BYTE_COUNT, val); | ||
| 729 | if (ret) | ||
| 730 | goto err_config_video; | ||
| 731 | |||
| 732 | val = mode->htotal | ((mode->htotal - mode->hsync_start) << 16); | ||
| 733 | ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_0, val); | ||
| 734 | if (ret) | ||
| 735 | goto err_config_video; | ||
| 736 | |||
| 737 | val = mode->hsync_end - mode->hsync_start; | ||
| 738 | val |= (mode->hdisplay << 16) | (video->h_sync_polarity << 15); | ||
| 739 | ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_1, val); | ||
| 740 | if (ret) | ||
| 741 | goto err_config_video; | ||
| 742 | |||
| 743 | val = mode->vtotal; | ||
| 744 | val |= (mode->vtotal - mode->vsync_start) << 16; | ||
| 745 | ret = cdn_dp_reg_write(dp, MSA_VERTICAL_0, val); | ||
| 746 | if (ret) | ||
| 747 | goto err_config_video; | ||
| 748 | |||
| 749 | val = mode->vsync_end - mode->vsync_start; | ||
| 750 | val |= (mode->vdisplay << 16) | (video->v_sync_polarity << 15); | ||
| 751 | ret = cdn_dp_reg_write(dp, MSA_VERTICAL_1, val); | ||
| 752 | if (ret) | ||
| 753 | goto err_config_video; | ||
| 754 | |||
| 755 | val = cdn_dp_get_msa_misc(video, mode); | ||
| 756 | ret = cdn_dp_reg_write(dp, MSA_MISC, val); | ||
| 757 | if (ret) | ||
| 758 | goto err_config_video; | ||
| 759 | |||
| 760 | ret = cdn_dp_reg_write(dp, STREAM_CONFIG, 1); | ||
| 761 | if (ret) | ||
| 762 | goto err_config_video; | ||
| 763 | |||
| 764 | val = mode->hsync_end - mode->hsync_start; | ||
| 765 | val |= mode->hdisplay << 16; | ||
| 766 | ret = cdn_dp_reg_write(dp, DP_HORIZONTAL, val); | ||
| 767 | if (ret) | ||
| 768 | goto err_config_video; | ||
| 769 | |||
| 770 | val = mode->vdisplay; | ||
| 771 | val |= (mode->vtotal - mode->vsync_start) << 16; | ||
| 772 | ret = cdn_dp_reg_write(dp, DP_VERTICAL_0, val); | ||
| 773 | if (ret) | ||
| 774 | goto err_config_video; | ||
| 775 | |||
| 776 | val = mode->vtotal; | ||
| 777 | ret = cdn_dp_reg_write(dp, DP_VERTICAL_1, val); | ||
| 778 | if (ret) | ||
| 779 | goto err_config_video; | ||
| 780 | |||
| 781 | ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 2, 1, 0); | ||
| 782 | |||
| 783 | err_config_video: | ||
| 784 | if (ret) | ||
| 785 | DRM_DEV_ERROR(dp->dev, "config video failed: %d\n", ret); | ||
| 786 | return ret; | ||
| 787 | } | ||
| 788 | |||
| 789 | int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio) | ||
| 790 | { | ||
| 791 | u32 val; | ||
| 792 | int ret; | ||
| 793 | |||
| 794 | ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0); | ||
| 795 | if (ret) { | ||
| 796 | DRM_DEV_ERROR(dp->dev, "audio stop failed: %d\n", ret); | ||
| 797 | return ret; | ||
| 798 | } | ||
| 799 | |||
| 800 | val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS; | ||
| 801 | val |= SPDIF_FIFO_MID_RANGE(0xe0); | ||
| 802 | val |= SPDIF_JITTER_THRSH(0xe0); | ||
| 803 | val |= SPDIF_JITTER_AVG_WIN(7); | ||
| 804 | writel(val, dp->regs + SPDIF_CTRL_ADDR); | ||
| 805 | |||
| 806 | /* clearn the audio config and reset */ | ||
| 807 | writel(0, dp->regs + AUDIO_SRC_CNTL); | ||
| 808 | writel(0, dp->regs + AUDIO_SRC_CNFG); | ||
| 809 | writel(AUDIO_SW_RST, dp->regs + AUDIO_SRC_CNTL); | ||
| 810 | writel(0, dp->regs + AUDIO_SRC_CNTL); | ||
| 811 | |||
| 812 | /* reset smpl2pckt component */ | ||
| 813 | writel(0, dp->regs + SMPL2PKT_CNTL); | ||
| 814 | writel(AUDIO_SW_RST, dp->regs + SMPL2PKT_CNTL); | ||
| 815 | writel(0, dp->regs + SMPL2PKT_CNTL); | ||
| 816 | |||
| 817 | /* reset FIFO */ | ||
| 818 | writel(AUDIO_SW_RST, dp->regs + FIFO_CNTL); | ||
| 819 | writel(0, dp->regs + FIFO_CNTL); | ||
| 820 | |||
| 821 | if (audio->format == AFMT_SPDIF) | ||
| 822 | clk_disable_unprepare(dp->spdif_clk); | ||
| 823 | |||
| 824 | return 0; | ||
| 825 | } | ||
| 826 | |||
| 827 | int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable) | ||
| 828 | { | ||
| 829 | int ret; | ||
| 830 | |||
| 831 | ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 4, 1, enable); | ||
| 832 | if (ret) | ||
| 833 | DRM_DEV_ERROR(dp->dev, "audio mute failed: %d\n", ret); | ||
| 834 | |||
| 835 | return ret; | ||
| 836 | } | ||
| 837 | |||
| 838 | static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp, | ||
| 839 | struct audio_info *audio) | ||
| 840 | { | ||
| 841 | int sub_pckt_num = 1, i2s_port_en_val = 0xf, i; | ||
| 842 | u32 val; | ||
| 843 | |||
| 844 | if (audio->channels == 2) { | ||
| 845 | if (dp->link.num_lanes == 1) | ||
| 846 | sub_pckt_num = 2; | ||
| 847 | else | ||
| 848 | sub_pckt_num = 4; | ||
| 849 | |||
| 850 | i2s_port_en_val = 1; | ||
| 851 | } else if (audio->channels == 4) { | ||
| 852 | i2s_port_en_val = 3; | ||
| 853 | } | ||
| 854 | |||
| 855 | writel(0x0, dp->regs + SPDIF_CTRL_ADDR); | ||
| 856 | |||
| 857 | writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL); | ||
| 858 | |||
| 859 | val = MAX_NUM_CH(audio->channels); | ||
| 860 | val |= NUM_OF_I2S_PORTS(audio->channels); | ||
| 861 | val |= AUDIO_TYPE_LPCM; | ||
| 862 | val |= CFG_SUB_PCKT_NUM(sub_pckt_num); | ||
| 863 | writel(val, dp->regs + SMPL2PKT_CNFG); | ||
| 864 | |||
| 865 | if (audio->sample_width == 16) | ||
| 866 | val = 0; | ||
| 867 | else if (audio->sample_width == 24) | ||
| 868 | val = 1 << 9; | ||
| 869 | else | ||
| 870 | val = 2 << 9; | ||
| 871 | |||
| 872 | val |= AUDIO_CH_NUM(audio->channels); | ||
| 873 | val |= I2S_DEC_PORT_EN(i2s_port_en_val); | ||
| 874 | val |= TRANS_SMPL_WIDTH_32; | ||
| 875 | writel(val, dp->regs + AUDIO_SRC_CNFG); | ||
| 876 | |||
| 877 | for (i = 0; i < (audio->channels + 1) / 2; i++) { | ||
| 878 | if (audio->sample_width == 16) | ||
| 879 | val = (0x02 << 8) | (0x02 << 20); | ||
| 880 | else if (audio->sample_width == 24) | ||
| 881 | val = (0x0b << 8) | (0x0b << 20); | ||
| 882 | |||
| 883 | val |= ((2 * i) << 4) | ((2 * i + 1) << 16); | ||
| 884 | writel(val, dp->regs + STTS_BIT_CH(i)); | ||
| 885 | } | ||
| 886 | |||
| 887 | switch (audio->sample_rate) { | ||
| 888 | case 32000: | ||
| 889 | val = SAMPLING_FREQ(3) | | ||
| 890 | ORIGINAL_SAMP_FREQ(0xc); | ||
| 891 | break; | ||
| 892 | case 44100: | ||
| 893 | val = SAMPLING_FREQ(0) | | ||
| 894 | ORIGINAL_SAMP_FREQ(0xf); | ||
| 895 | break; | ||
| 896 | case 48000: | ||
| 897 | val = SAMPLING_FREQ(2) | | ||
| 898 | ORIGINAL_SAMP_FREQ(0xd); | ||
| 899 | break; | ||
| 900 | case 88200: | ||
| 901 | val = SAMPLING_FREQ(8) | | ||
| 902 | ORIGINAL_SAMP_FREQ(0x7); | ||
| 903 | break; | ||
| 904 | case 96000: | ||
| 905 | val = SAMPLING_FREQ(0xa) | | ||
| 906 | ORIGINAL_SAMP_FREQ(5); | ||
| 907 | break; | ||
| 908 | case 176400: | ||
| 909 | val = SAMPLING_FREQ(0xc) | | ||
| 910 | ORIGINAL_SAMP_FREQ(3); | ||
| 911 | break; | ||
| 912 | case 192000: | ||
| 913 | val = SAMPLING_FREQ(0xe) | | ||
| 914 | ORIGINAL_SAMP_FREQ(1); | ||
| 915 | break; | ||
| 916 | } | ||
| 917 | val |= 4; | ||
| 918 | writel(val, dp->regs + COM_CH_STTS_BITS); | ||
| 919 | |||
| 920 | writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL); | ||
| 921 | writel(I2S_DEC_START, dp->regs + AUDIO_SRC_CNTL); | ||
| 922 | } | ||
| 923 | |||
| 924 | static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp) | ||
| 925 | { | ||
| 926 | u32 val; | ||
| 927 | |||
| 928 | val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS; | ||
| 929 | val |= SPDIF_FIFO_MID_RANGE(0xe0); | ||
| 930 | val |= SPDIF_JITTER_THRSH(0xe0); | ||
| 931 | val |= SPDIF_JITTER_AVG_WIN(7); | ||
| 932 | writel(val, dp->regs + SPDIF_CTRL_ADDR); | ||
| 933 | |||
| 934 | writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL); | ||
| 935 | |||
| 936 | val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4); | ||
| 937 | writel(val, dp->regs + SMPL2PKT_CNFG); | ||
| 938 | writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL); | ||
| 939 | |||
| 940 | val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS; | ||
| 941 | val |= SPDIF_FIFO_MID_RANGE(0xe0); | ||
| 942 | val |= SPDIF_JITTER_THRSH(0xe0); | ||
| 943 | val |= SPDIF_JITTER_AVG_WIN(7); | ||
| 944 | writel(val, dp->regs + SPDIF_CTRL_ADDR); | ||
| 945 | |||
| 946 | clk_prepare_enable(dp->spdif_clk); | ||
| 947 | clk_set_rate(dp->spdif_clk, CDN_DP_SPDIF_CLK); | ||
| 948 | } | ||
| 949 | |||
| 950 | int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio) | ||
| 951 | { | ||
| 952 | int ret; | ||
| 953 | |||
| 954 | /* reset the spdif clk before config */ | ||
| 955 | if (audio->format == AFMT_SPDIF) { | ||
| 956 | reset_control_assert(dp->spdif_rst); | ||
| 957 | reset_control_deassert(dp->spdif_rst); | ||
| 958 | } | ||
| 959 | |||
| 960 | ret = cdn_dp_reg_write(dp, CM_LANE_CTRL, LANE_REF_CYC); | ||
| 961 | if (ret) | ||
| 962 | goto err_audio_config; | ||
| 963 | |||
| 964 | ret = cdn_dp_reg_write(dp, CM_CTRL, 0); | ||
| 965 | if (ret) | ||
| 966 | goto err_audio_config; | ||
| 967 | |||
| 968 | if (audio->format == AFMT_I2S) | ||
| 969 | cdn_dp_audio_config_i2s(dp, audio); | ||
| 970 | else if (audio->format == AFMT_SPDIF) | ||
| 971 | cdn_dp_audio_config_spdif(dp); | ||
| 972 | |||
| 973 | ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN); | ||
| 974 | |||
| 975 | err_audio_config: | ||
| 976 | if (ret) | ||
| 977 | DRM_DEV_ERROR(dp->dev, "audio config failed: %d\n", ret); | ||
| 978 | return ret; | ||
| 979 | } | ||
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h new file mode 100644 index 000000000000..b5f215324694 --- /dev/null +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h | |||
| @@ -0,0 +1,483 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | ||
| 3 | * Author: Chris Zhong <zyw@rock-chips.com> | ||
| 4 | * | ||
| 5 | * This software is licensed under the terms of the GNU General Public | ||
| 6 | * License version 2, as published by the Free Software Foundation, and | ||
| 7 | * may be copied, distributed, and modified under those terms. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef _CDN_DP_REG_H | ||
| 16 | #define _CDN_DP_REG_H | ||
| 17 | |||
| 18 | #include <linux/bitops.h> | ||
| 19 | |||
| 20 | #define ADDR_IMEM 0x10000 | ||
| 21 | #define ADDR_DMEM 0x20000 | ||
| 22 | |||
| 23 | /* APB CFG addr */ | ||
| 24 | #define APB_CTRL 0 | ||
| 25 | #define XT_INT_CTRL 0x04 | ||
| 26 | #define MAILBOX_FULL_ADDR 0x08 | ||
| 27 | #define MAILBOX_EMPTY_ADDR 0x0c | ||
| 28 | #define MAILBOX0_WR_DATA 0x10 | ||
| 29 | #define MAILBOX0_RD_DATA 0x14 | ||
| 30 | #define KEEP_ALIVE 0x18 | ||
| 31 | #define VER_L 0x1c | ||
| 32 | #define VER_H 0x20 | ||
| 33 | #define VER_LIB_L_ADDR 0x24 | ||
| 34 | #define VER_LIB_H_ADDR 0x28 | ||
| 35 | #define SW_DEBUG_L 0x2c | ||
| 36 | #define SW_DEBUG_H 0x30 | ||
| 37 | #define MAILBOX_INT_MASK 0x34 | ||
| 38 | #define MAILBOX_INT_STATUS 0x38 | ||
| 39 | #define SW_CLK_L 0x3c | ||
| 40 | #define SW_CLK_H 0x40 | ||
| 41 | #define SW_EVENTS0 0x44 | ||
| 42 | #define SW_EVENTS1 0x48 | ||
| 43 | #define SW_EVENTS2 0x4c | ||
| 44 | #define SW_EVENTS3 0x50 | ||
| 45 | #define XT_OCD_CTRL 0x60 | ||
| 46 | #define APB_INT_MASK 0x6c | ||
| 47 | #define APB_STATUS_MASK 0x70 | ||
| 48 | |||
| 49 | /* audio decoder addr */ | ||
| 50 | #define AUDIO_SRC_CNTL 0x30000 | ||
| 51 | #define AUDIO_SRC_CNFG 0x30004 | ||
| 52 | #define COM_CH_STTS_BITS 0x30008 | ||
| 53 | #define STTS_BIT_CH(x) (0x3000c + ((x) << 2)) | ||
| 54 | #define SPDIF_CTRL_ADDR 0x3004c | ||
| 55 | #define SPDIF_CH1_CS_3100_ADDR 0x30050 | ||
| 56 | #define SPDIF_CH1_CS_6332_ADDR 0x30054 | ||
| 57 | #define SPDIF_CH1_CS_9564_ADDR 0x30058 | ||
| 58 | #define SPDIF_CH1_CS_12796_ADDR 0x3005c | ||
| 59 | #define SPDIF_CH1_CS_159128_ADDR 0x30060 | ||
| 60 | #define SPDIF_CH1_CS_191160_ADDR 0x30064 | ||
| 61 | #define SPDIF_CH2_CS_3100_ADDR 0x30068 | ||
| 62 | #define SPDIF_CH2_CS_6332_ADDR 0x3006c | ||
| 63 | #define SPDIF_CH2_CS_9564_ADDR 0x30070 | ||
| 64 | #define SPDIF_CH2_CS_12796_ADDR 0x30074 | ||
| 65 | #define SPDIF_CH2_CS_159128_ADDR 0x30078 | ||
| 66 | #define SPDIF_CH2_CS_191160_ADDR 0x3007c | ||
| 67 | #define SMPL2PKT_CNTL 0x30080 | ||
| 68 | #define SMPL2PKT_CNFG 0x30084 | ||
| 69 | #define FIFO_CNTL 0x30088 | ||
| 70 | #define FIFO_STTS 0x3008c | ||
| 71 | |||
| 72 | /* source pif addr */ | ||
| 73 | #define SOURCE_PIF_WR_ADDR 0x30800 | ||
| 74 | #define SOURCE_PIF_WR_REQ 0x30804 | ||
| 75 | #define SOURCE_PIF_RD_ADDR 0x30808 | ||
| 76 | #define SOURCE_PIF_RD_REQ 0x3080c | ||
| 77 | #define SOURCE_PIF_DATA_WR 0x30810 | ||
| 78 | #define SOURCE_PIF_DATA_RD 0x30814 | ||
| 79 | #define SOURCE_PIF_FIFO1_FLUSH 0x30818 | ||
| 80 | #define SOURCE_PIF_FIFO2_FLUSH 0x3081c | ||
| 81 | #define SOURCE_PIF_STATUS 0x30820 | ||
| 82 | #define SOURCE_PIF_INTERRUPT_SOURCE 0x30824 | ||
| 83 | #define SOURCE_PIF_INTERRUPT_MASK 0x30828 | ||
| 84 | #define SOURCE_PIF_PKT_ALLOC_REG 0x3082c | ||
| 85 | #define SOURCE_PIF_PKT_ALLOC_WR_EN 0x30830 | ||
| 86 | #define SOURCE_PIF_SW_RESET 0x30834 | ||
| 87 | |||
| 88 | /* bellow registers need access by mailbox */ | ||
| 89 | /* source car addr */ | ||
| 90 | #define SOURCE_HDTX_CAR 0x0900 | ||
| 91 | #define SOURCE_DPTX_CAR 0x0904 | ||
| 92 | #define SOURCE_PHY_CAR 0x0908 | ||
| 93 | #define SOURCE_CEC_CAR 0x090c | ||
| 94 | #define SOURCE_CBUS_CAR 0x0910 | ||
| 95 | #define SOURCE_PKT_CAR 0x0918 | ||
| 96 | #define SOURCE_AIF_CAR 0x091c | ||
| 97 | #define SOURCE_CIPHER_CAR 0x0920 | ||
| 98 | #define SOURCE_CRYPTO_CAR 0x0924 | ||
| 99 | |||
| 100 | /* clock meters addr */ | ||
| 101 | #define CM_CTRL 0x0a00 | ||
| 102 | #define CM_I2S_CTRL 0x0a04 | ||
| 103 | #define CM_SPDIF_CTRL 0x0a08 | ||
| 104 | #define CM_VID_CTRL 0x0a0c | ||
| 105 | #define CM_LANE_CTRL 0x0a10 | ||
| 106 | #define I2S_NM_STABLE 0x0a14 | ||
| 107 | #define I2S_NCTS_STABLE 0x0a18 | ||
| 108 | #define SPDIF_NM_STABLE 0x0a1c | ||
| 109 | #define SPDIF_NCTS_STABLE 0x0a20 | ||
| 110 | #define NMVID_MEAS_STABLE 0x0a24 | ||
| 111 | #define I2S_MEAS 0x0a40 | ||
| 112 | #define SPDIF_MEAS 0x0a80 | ||
| 113 | #define NMVID_MEAS 0x0ac0 | ||
| 114 | |||
| 115 | /* source vif addr */ | ||
| 116 | #define BND_HSYNC2VSYNC 0x0b00 | ||
| 117 | #define HSYNC2VSYNC_F1_L1 0x0b04 | ||
| 118 | #define HSYNC2VSYNC_F2_L1 0x0b08 | ||
| 119 | #define HSYNC2VSYNC_STATUS 0x0b0c | ||
| 120 | #define HSYNC2VSYNC_POL_CTRL 0x0b10 | ||
| 121 | |||
| 122 | /* dptx phy addr */ | ||
| 123 | #define DP_TX_PHY_CONFIG_REG 0x2000 | ||
| 124 | #define DP_TX_PHY_STATUS_REG 0x2004 | ||
| 125 | #define DP_TX_PHY_SW_RESET 0x2008 | ||
| 126 | #define DP_TX_PHY_SCRAMBLER_SEED 0x200c | ||
| 127 | #define DP_TX_PHY_TRAINING_01_04 0x2010 | ||
| 128 | #define DP_TX_PHY_TRAINING_05_08 0x2014 | ||
| 129 | #define DP_TX_PHY_TRAINING_09_10 0x2018 | ||
| 130 | #define TEST_COR 0x23fc | ||
| 131 | |||
| 132 | /* dptx hpd addr */ | ||
| 133 | #define HPD_IRQ_DET_MIN_TIMER 0x2100 | ||
| 134 | #define HPD_IRQ_DET_MAX_TIMER 0x2104 | ||
| 135 | #define HPD_UNPLGED_DET_MIN_TIMER 0x2108 | ||
| 136 | #define HPD_STABLE_TIMER 0x210c | ||
| 137 | #define HPD_FILTER_TIMER 0x2110 | ||
| 138 | #define HPD_EVENT_MASK 0x211c | ||
| 139 | #define HPD_EVENT_DET 0x2120 | ||
| 140 | |||
| 141 | /* dpyx framer addr */ | ||
| 142 | #define DP_FRAMER_GLOBAL_CONFIG 0x2200 | ||
| 143 | #define DP_SW_RESET 0x2204 | ||
| 144 | #define DP_FRAMER_TU 0x2208 | ||
| 145 | #define DP_FRAMER_PXL_REPR 0x220c | ||
| 146 | #define DP_FRAMER_SP 0x2210 | ||
| 147 | #define AUDIO_PACK_CONTROL 0x2214 | ||
| 148 | #define DP_VC_TABLE(x) (0x2218 + ((x) << 2)) | ||
| 149 | #define DP_VB_ID 0x2258 | ||
| 150 | #define DP_MTPH_LVP_CONTROL 0x225c | ||
| 151 | #define DP_MTPH_SYMBOL_VALUES 0x2260 | ||
| 152 | #define DP_MTPH_ECF_CONTROL 0x2264 | ||
| 153 | #define DP_MTPH_ACT_CONTROL 0x2268 | ||
| 154 | #define DP_MTPH_STATUS 0x226c | ||
| 155 | #define DP_INTERRUPT_SOURCE 0x2270 | ||
| 156 | #define DP_INTERRUPT_MASK 0x2274 | ||
| 157 | #define DP_FRONT_BACK_PORCH 0x2278 | ||
| 158 | #define DP_BYTE_COUNT 0x227c | ||
| 159 | |||
| 160 | /* dptx stream addr */ | ||
| 161 | #define MSA_HORIZONTAL_0 0x2280 | ||
| 162 | #define MSA_HORIZONTAL_1 0x2284 | ||
| 163 | #define MSA_VERTICAL_0 0x2288 | ||
| 164 | #define MSA_VERTICAL_1 0x228c | ||
| 165 | #define MSA_MISC 0x2290 | ||
| 166 | #define STREAM_CONFIG 0x2294 | ||
| 167 | #define AUDIO_PACK_STATUS 0x2298 | ||
| 168 | #define VIF_STATUS 0x229c | ||
| 169 | #define PCK_STUFF_STATUS_0 0x22a0 | ||
| 170 | #define PCK_STUFF_STATUS_1 0x22a4 | ||
| 171 | #define INFO_PACK_STATUS 0x22a8 | ||
| 172 | #define RATE_GOVERNOR_STATUS 0x22ac | ||
| 173 | #define DP_HORIZONTAL 0x22b0 | ||
| 174 | #define DP_VERTICAL_0 0x22b4 | ||
| 175 | #define DP_VERTICAL_1 0x22b8 | ||
| 176 | #define DP_BLOCK_SDP 0x22bc | ||
| 177 | |||
| 178 | /* dptx glbl addr */ | ||
| 179 | #define DPTX_LANE_EN 0x2300 | ||
| 180 | #define DPTX_ENHNCD 0x2304 | ||
| 181 | #define DPTX_INT_MASK 0x2308 | ||
| 182 | #define DPTX_INT_STATUS 0x230c | ||
| 183 | |||
| 184 | /* dp aux addr */ | ||
| 185 | #define DP_AUX_HOST_CONTROL 0x2800 | ||
| 186 | #define DP_AUX_INTERRUPT_SOURCE 0x2804 | ||
| 187 | #define DP_AUX_INTERRUPT_MASK 0x2808 | ||
| 188 | #define DP_AUX_SWAP_INVERSION_CONTROL 0x280c | ||
| 189 | #define DP_AUX_SEND_NACK_TRANSACTION 0x2810 | ||
| 190 | #define DP_AUX_CLEAR_RX 0x2814 | ||
| 191 | #define DP_AUX_CLEAR_TX 0x2818 | ||
| 192 | #define DP_AUX_TIMER_STOP 0x281c | ||
| 193 | #define DP_AUX_TIMER_CLEAR 0x2820 | ||
| 194 | #define DP_AUX_RESET_SW 0x2824 | ||
| 195 | #define DP_AUX_DIVIDE_2M 0x2828 | ||
| 196 | #define DP_AUX_TX_PREACHARGE_LENGTH 0x282c | ||
| 197 | #define DP_AUX_FREQUENCY_1M_MAX 0x2830 | ||
| 198 | #define DP_AUX_FREQUENCY_1M_MIN 0x2834 | ||
| 199 | #define DP_AUX_RX_PRE_MIN 0x2838 | ||
| 200 | #define DP_AUX_RX_PRE_MAX 0x283c | ||
| 201 | #define DP_AUX_TIMER_PRESET 0x2840 | ||
| 202 | #define DP_AUX_NACK_FORMAT 0x2844 | ||
| 203 | #define DP_AUX_TX_DATA 0x2848 | ||
| 204 | #define DP_AUX_RX_DATA 0x284c | ||
| 205 | #define DP_AUX_TX_STATUS 0x2850 | ||
| 206 | #define DP_AUX_RX_STATUS 0x2854 | ||
| 207 | #define DP_AUX_RX_CYCLE_COUNTER 0x2858 | ||
| 208 | #define DP_AUX_MAIN_STATES 0x285c | ||
| 209 | #define DP_AUX_MAIN_TIMER 0x2860 | ||
| 210 | #define DP_AUX_AFE_OUT 0x2864 | ||
| 211 | |||
| 212 | /* crypto addr */ | ||
| 213 | #define CRYPTO_HDCP_REVISION 0x5800 | ||
| 214 | #define HDCP_CRYPTO_CONFIG 0x5804 | ||
| 215 | #define CRYPTO_INTERRUPT_SOURCE 0x5808 | ||
| 216 | #define CRYPTO_INTERRUPT_MASK 0x580c | ||
| 217 | #define CRYPTO22_CONFIG 0x5818 | ||
| 218 | #define CRYPTO22_STATUS 0x581c | ||
| 219 | #define SHA_256_DATA_IN 0x583c | ||
| 220 | #define SHA_256_DATA_OUT_(x) (0x5850 + ((x) << 2)) | ||
| 221 | #define AES_32_KEY_(x) (0x5870 + ((x) << 2)) | ||
| 222 | #define AES_32_DATA_IN 0x5880 | ||
| 223 | #define AES_32_DATA_OUT_(x) (0x5884 + ((x) << 2)) | ||
| 224 | #define CRYPTO14_CONFIG 0x58a0 | ||
| 225 | #define CRYPTO14_STATUS 0x58a4 | ||
| 226 | #define CRYPTO14_PRNM_OUT 0x58a8 | ||
| 227 | #define CRYPTO14_KM_0 0x58ac | ||
| 228 | #define CRYPTO14_KM_1 0x58b0 | ||
| 229 | #define CRYPTO14_AN_0 0x58b4 | ||
| 230 | #define CRYPTO14_AN_1 0x58b8 | ||
| 231 | #define CRYPTO14_YOUR_KSV_0 0x58bc | ||
| 232 | #define CRYPTO14_YOUR_KSV_1 0x58c0 | ||
| 233 | #define CRYPTO14_MI_0 0x58c4 | ||
| 234 | #define CRYPTO14_MI_1 0x58c8 | ||
| 235 | #define CRYPTO14_TI_0 0x58cc | ||
| 236 | #define CRYPTO14_KI_0 0x58d0 | ||
| 237 | #define CRYPTO14_KI_1 0x58d4 | ||
| 238 | #define CRYPTO14_BLOCKS_NUM 0x58d8 | ||
| 239 | #define CRYPTO14_KEY_MEM_DATA_0 0x58dc | ||
| 240 | #define CRYPTO14_KEY_MEM_DATA_1 0x58e0 | ||
| 241 | #define CRYPTO14_SHA1_MSG_DATA 0x58e4 | ||
| 242 | #define CRYPTO14_SHA1_V_VALUE_(x) (0x58e8 + ((x) << 2)) | ||
| 243 | #define TRNG_CTRL 0x58fc | ||
| 244 | #define TRNG_DATA_RDY 0x5900 | ||
| 245 | #define TRNG_DATA 0x5904 | ||
| 246 | |||
| 247 | /* cipher addr */ | ||
| 248 | #define HDCP_REVISION 0x60000 | ||
| 249 | #define INTERRUPT_SOURCE 0x60004 | ||
| 250 | #define INTERRUPT_MASK 0x60008 | ||
| 251 | #define HDCP_CIPHER_CONFIG 0x6000c | ||
| 252 | #define AES_128_KEY_0 0x60010 | ||
| 253 | #define AES_128_KEY_1 0x60014 | ||
| 254 | #define AES_128_KEY_2 0x60018 | ||
| 255 | #define AES_128_KEY_3 0x6001c | ||
| 256 | #define AES_128_RANDOM_0 0x60020 | ||
| 257 | #define AES_128_RANDOM_1 0x60024 | ||
| 258 | #define CIPHER14_KM_0 0x60028 | ||
| 259 | #define CIPHER14_KM_1 0x6002c | ||
| 260 | #define CIPHER14_STATUS 0x60030 | ||
| 261 | #define CIPHER14_RI_PJ_STATUS 0x60034 | ||
| 262 | #define CIPHER_MODE 0x60038 | ||
| 263 | #define CIPHER14_AN_0 0x6003c | ||
| 264 | #define CIPHER14_AN_1 0x60040 | ||
| 265 | #define CIPHER22_AUTH 0x60044 | ||
| 266 | #define CIPHER14_R0_DP_STATUS 0x60048 | ||
| 267 | #define CIPHER14_BOOTSTRAP 0x6004c | ||
| 268 | |||
| 269 | #define DPTX_FRMR_DATA_CLK_RSTN_EN BIT(11) | ||
| 270 | #define DPTX_FRMR_DATA_CLK_EN BIT(10) | ||
| 271 | #define DPTX_PHY_DATA_RSTN_EN BIT(9) | ||
| 272 | #define DPTX_PHY_DATA_CLK_EN BIT(8) | ||
| 273 | #define DPTX_PHY_CHAR_RSTN_EN BIT(7) | ||
| 274 | #define DPTX_PHY_CHAR_CLK_EN BIT(6) | ||
| 275 | #define SOURCE_AUX_SYS_CLK_RSTN_EN BIT(5) | ||
| 276 | #define SOURCE_AUX_SYS_CLK_EN BIT(4) | ||
| 277 | #define DPTX_SYS_CLK_RSTN_EN BIT(3) | ||
| 278 | #define DPTX_SYS_CLK_EN BIT(2) | ||
| 279 | #define CFG_DPTX_VIF_CLK_RSTN_EN BIT(1) | ||
| 280 | #define CFG_DPTX_VIF_CLK_EN BIT(0) | ||
| 281 | |||
| 282 | #define SOURCE_PHY_RSTN_EN BIT(1) | ||
| 283 | #define SOURCE_PHY_CLK_EN BIT(0) | ||
| 284 | |||
| 285 | #define SOURCE_PKT_SYS_RSTN_EN BIT(3) | ||
| 286 | #define SOURCE_PKT_SYS_CLK_EN BIT(2) | ||
| 287 | #define SOURCE_PKT_DATA_RSTN_EN BIT(1) | ||
| 288 | #define SOURCE_PKT_DATA_CLK_EN BIT(0) | ||
| 289 | |||
| 290 | #define SPDIF_CDR_CLK_RSTN_EN BIT(5) | ||
| 291 | #define SPDIF_CDR_CLK_EN BIT(4) | ||
| 292 | #define SOURCE_AIF_SYS_RSTN_EN BIT(3) | ||
| 293 | #define SOURCE_AIF_SYS_CLK_EN BIT(2) | ||
| 294 | #define SOURCE_AIF_CLK_RSTN_EN BIT(1) | ||
| 295 | #define SOURCE_AIF_CLK_EN BIT(0) | ||
| 296 | |||
| 297 | #define SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN BIT(3) | ||
| 298 | #define SOURCE_CIPHER_SYS_CLK_EN BIT(2) | ||
| 299 | #define SOURCE_CIPHER_CHAR_CLK_RSTN_EN BIT(1) | ||
| 300 | #define SOURCE_CIPHER_CHAR_CLK_EN BIT(0) | ||
| 301 | |||
| 302 | #define SOURCE_CRYPTO_SYS_CLK_RSTN_EN BIT(1) | ||
| 303 | #define SOURCE_CRYPTO_SYS_CLK_EN BIT(0) | ||
| 304 | |||
| 305 | #define APB_IRAM_PATH BIT(2) | ||
| 306 | #define APB_DRAM_PATH BIT(1) | ||
| 307 | #define APB_XT_RESET BIT(0) | ||
| 308 | |||
| 309 | #define MAILBOX_INT_MASK_BIT BIT(1) | ||
| 310 | #define PIF_INT_MASK_BIT BIT(0) | ||
| 311 | #define ALL_INT_MASK 3 | ||
| 312 | |||
| 313 | /* mailbox */ | ||
| 314 | #define MB_OPCODE_ID 0 | ||
| 315 | #define MB_MODULE_ID 1 | ||
| 316 | #define MB_SIZE_MSB_ID 2 | ||
| 317 | #define MB_SIZE_LSB_ID 3 | ||
| 318 | #define MB_DATA_ID 4 | ||
| 319 | |||
| 320 | #define MB_MODULE_ID_DP_TX 0x01 | ||
| 321 | #define MB_MODULE_ID_HDCP_TX 0x07 | ||
| 322 | #define MB_MODULE_ID_HDCP_RX 0x08 | ||
| 323 | #define MB_MODULE_ID_HDCP_GENERAL 0x09 | ||
| 324 | #define MB_MODULE_ID_GENERAL 0x0a | ||
| 325 | |||
| 326 | /* general opcode */ | ||
| 327 | #define GENERAL_MAIN_CONTROL 0x01 | ||
| 328 | #define GENERAL_TEST_ECHO 0x02 | ||
| 329 | #define GENERAL_BUS_SETTINGS 0x03 | ||
| 330 | #define GENERAL_TEST_ACCESS 0x04 | ||
| 331 | |||
| 332 | #define DPTX_SET_POWER_MNG 0x00 | ||
| 333 | #define DPTX_SET_HOST_CAPABILITIES 0x01 | ||
| 334 | #define DPTX_GET_EDID 0x02 | ||
| 335 | #define DPTX_READ_DPCD 0x03 | ||
| 336 | #define DPTX_WRITE_DPCD 0x04 | ||
| 337 | #define DPTX_ENABLE_EVENT 0x05 | ||
| 338 | #define DPTX_WRITE_REGISTER 0x06 | ||
| 339 | #define DPTX_READ_REGISTER 0x07 | ||
| 340 | #define DPTX_WRITE_FIELD 0x08 | ||
| 341 | #define DPTX_TRAINING_CONTROL 0x09 | ||
| 342 | #define DPTX_READ_EVENT 0x0a | ||
| 343 | #define DPTX_READ_LINK_STAT 0x0b | ||
| 344 | #define DPTX_SET_VIDEO 0x0c | ||
| 345 | #define DPTX_SET_AUDIO 0x0d | ||
| 346 | #define DPTX_GET_LAST_AUX_STAUS 0x0e | ||
| 347 | #define DPTX_SET_LINK_BREAK_POINT 0x0f | ||
| 348 | #define DPTX_FORCE_LANES 0x10 | ||
| 349 | #define DPTX_HPD_STATE 0x11 | ||
| 350 | |||
| 351 | #define FW_STANDBY 0 | ||
| 352 | #define FW_ACTIVE 1 | ||
| 353 | |||
| 354 | #define DPTX_EVENT_ENABLE_HPD BIT(0) | ||
| 355 | #define DPTX_EVENT_ENABLE_TRAINING BIT(1) | ||
| 356 | |||
| 357 | #define LINK_TRAINING_NOT_ACTIVE 0 | ||
| 358 | #define LINK_TRAINING_RUN 1 | ||
| 359 | #define LINK_TRAINING_RESTART 2 | ||
| 360 | |||
| 361 | #define CONTROL_VIDEO_IDLE 0 | ||
| 362 | #define CONTROL_VIDEO_VALID 1 | ||
| 363 | |||
| 364 | #define TU_CNT_RST_EN BIT(15) | ||
| 365 | #define VIF_BYPASS_INTERLACE BIT(13) | ||
| 366 | #define INTERLACE_FMT_DET BIT(12) | ||
| 367 | #define INTERLACE_DTCT_WIN 0x20 | ||
| 368 | |||
| 369 | #define DP_FRAMER_SP_INTERLACE_EN BIT(2) | ||
| 370 | #define DP_FRAMER_SP_HSP BIT(1) | ||
| 371 | #define DP_FRAMER_SP_VSP BIT(0) | ||
| 372 | |||
| 373 | /* capability */ | ||
| 374 | #define AUX_HOST_INVERT 3 | ||
| 375 | #define FAST_LT_SUPPORT 1 | ||
| 376 | #define FAST_LT_NOT_SUPPORT 0 | ||
| 377 | #define LANE_MAPPING_NORMAL 0x1b | ||
| 378 | #define LANE_MAPPING_FLIPPED 0xe4 | ||
| 379 | #define ENHANCED 1 | ||
| 380 | #define SCRAMBLER_EN BIT(4) | ||
| 381 | |||
| 382 | #define FULL_LT_STARTED BIT(0) | ||
| 383 | #define FASE_LT_STARTED BIT(1) | ||
| 384 | #define CLK_RECOVERY_FINISHED BIT(2) | ||
| 385 | #define EQ_PHASE_FINISHED BIT(3) | ||
| 386 | #define FASE_LT_START_FINISHED BIT(4) | ||
| 387 | #define CLK_RECOVERY_FAILED BIT(5) | ||
| 388 | #define EQ_PHASE_FAILED BIT(6) | ||
| 389 | #define FASE_LT_FAILED BIT(7) | ||
| 390 | |||
| 391 | #define DPTX_HPD_EVENT BIT(0) | ||
| 392 | #define DPTX_TRAINING_EVENT BIT(1) | ||
| 393 | #define HDCP_TX_STATUS_EVENT BIT(4) | ||
| 394 | #define HDCP2_TX_IS_KM_STORED_EVENT BIT(5) | ||
| 395 | #define HDCP2_TX_STORE_KM_EVENT BIT(6) | ||
| 396 | #define HDCP_TX_IS_RECEIVER_ID_VALID_EVENT BIT(7) | ||
| 397 | |||
| 398 | #define TU_SIZE 30 | ||
| 399 | #define CDN_DP_MAX_LINK_RATE DP_LINK_BW_5_4 | ||
| 400 | |||
| 401 | /* audio */ | ||
| 402 | #define AUDIO_PACK_EN BIT(8) | ||
| 403 | #define SAMPLING_FREQ(x) (((x) & 0xf) << 16) | ||
| 404 | #define ORIGINAL_SAMP_FREQ(x) (((x) & 0xf) << 24) | ||
| 405 | #define SYNC_WR_TO_CH_ZERO BIT(1) | ||
| 406 | #define I2S_DEC_START BIT(1) | ||
| 407 | #define AUDIO_SW_RST BIT(0) | ||
| 408 | #define SMPL2PKT_EN BIT(1) | ||
| 409 | #define MAX_NUM_CH(x) (((x) & 0x1f) - 1) | ||
| 410 | #define NUM_OF_I2S_PORTS(x) ((((x) / 2 - 1) & 0x3) << 5) | ||
| 411 | #define AUDIO_TYPE_LPCM (2 << 7) | ||
| 412 | #define CFG_SUB_PCKT_NUM(x) ((((x) - 1) & 0x7) << 11) | ||
| 413 | #define AUDIO_CH_NUM(x) ((((x) - 1) & 0x1f) << 2) | ||
| 414 | #define TRANS_SMPL_WIDTH_16 0 | ||
| 415 | #define TRANS_SMPL_WIDTH_24 BIT(11) | ||
| 416 | #define TRANS_SMPL_WIDTH_32 (2 << 11) | ||
| 417 | #define I2S_DEC_PORT_EN(x) (((x) & 0xf) << 17) | ||
| 418 | #define SPDIF_ENABLE BIT(21) | ||
| 419 | #define SPDIF_AVG_SEL BIT(20) | ||
| 420 | #define SPDIF_JITTER_BYPASS BIT(19) | ||
| 421 | #define SPDIF_FIFO_MID_RANGE(x) (((x) & 0xff) << 11) | ||
| 422 | #define SPDIF_JITTER_THRSH(x) (((x) & 0xff) << 3) | ||
| 423 | #define SPDIF_JITTER_AVG_WIN(x) ((x) & 0x7) | ||
| 424 | |||
| 425 | /* Reference cycles when using lane clock as reference */ | ||
| 426 | #define LANE_REF_CYC 0x8000 | ||
| 427 | |||
| 428 | enum voltage_swing_level { | ||
| 429 | VOLTAGE_LEVEL_0, | ||
| 430 | VOLTAGE_LEVEL_1, | ||
| 431 | VOLTAGE_LEVEL_2, | ||
| 432 | VOLTAGE_LEVEL_3, | ||
| 433 | }; | ||
| 434 | |||
| 435 | enum pre_emphasis_level { | ||
| 436 | PRE_EMPHASIS_LEVEL_0, | ||
| 437 | PRE_EMPHASIS_LEVEL_1, | ||
| 438 | PRE_EMPHASIS_LEVEL_2, | ||
| 439 | PRE_EMPHASIS_LEVEL_3, | ||
| 440 | }; | ||
| 441 | |||
| 442 | enum pattern_set { | ||
| 443 | PTS1 = BIT(0), | ||
| 444 | PTS2 = BIT(1), | ||
| 445 | PTS3 = BIT(2), | ||
| 446 | PTS4 = BIT(3), | ||
| 447 | DP_NONE = BIT(4) | ||
| 448 | }; | ||
| 449 | |||
| 450 | enum vic_color_depth { | ||
| 451 | BCS_6 = 0x1, | ||
| 452 | BCS_8 = 0x2, | ||
| 453 | BCS_10 = 0x4, | ||
| 454 | BCS_12 = 0x8, | ||
| 455 | BCS_16 = 0x10, | ||
| 456 | }; | ||
| 457 | |||
| 458 | enum vic_bt_type { | ||
| 459 | BT_601 = 0x0, | ||
| 460 | BT_709 = 0x1, | ||
| 461 | }; | ||
| 462 | |||
| 463 | void cdn_dp_clock_reset(struct cdn_dp_device *dp); | ||
| 464 | |||
| 465 | void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, u32 clk); | ||
| 466 | int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem, | ||
| 467 | u32 i_size, const u32 *d_mem, u32 d_size); | ||
| 468 | int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable); | ||
| 469 | int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip); | ||
| 470 | int cdn_dp_event_config(struct cdn_dp_device *dp); | ||
| 471 | u32 cdn_dp_get_event(struct cdn_dp_device *dp); | ||
| 472 | int cdn_dp_get_hpd_status(struct cdn_dp_device *dp); | ||
| 473 | int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value); | ||
| 474 | int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len); | ||
| 475 | int cdn_dp_get_edid_block(void *dp, u8 *edid, | ||
| 476 | unsigned int block, size_t length); | ||
| 477 | int cdn_dp_train_link(struct cdn_dp_device *dp); | ||
| 478 | int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active); | ||
| 479 | int cdn_dp_config_video(struct cdn_dp_device *dp); | ||
| 480 | int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio); | ||
| 481 | int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable); | ||
| 482 | int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio); | ||
| 483 | #endif /* _CDN_DP_REG_H */ | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 7719b9cd5b74..ccf456938792 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
| @@ -14,19 +14,19 @@ | |||
| 14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <asm/dma-iommu.h> | ||
| 18 | |||
| 19 | #include <drm/drmP.h> | 17 | #include <drm/drmP.h> |
| 20 | #include <drm/drm_crtc_helper.h> | 18 | #include <drm/drm_crtc_helper.h> |
| 21 | #include <drm/drm_fb_helper.h> | 19 | #include <drm/drm_fb_helper.h> |
| 22 | #include <drm/drm_gem_cma_helper.h> | 20 | #include <drm/drm_gem_cma_helper.h> |
| 23 | #include <drm/drm_of.h> | 21 | #include <drm/drm_of.h> |
| 24 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
| 23 | #include <linux/dma-iommu.h> | ||
| 25 | #include <linux/pm_runtime.h> | 24 | #include <linux/pm_runtime.h> |
| 26 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 27 | #include <linux/of_graph.h> | 26 | #include <linux/of_graph.h> |
| 28 | #include <linux/component.h> | 27 | #include <linux/component.h> |
| 29 | #include <linux/console.h> | 28 | #include <linux/console.h> |
| 29 | #include <linux/iommu.h> | ||
| 30 | 30 | ||
| 31 | #include "rockchip_drm_drv.h" | 31 | #include "rockchip_drm_drv.h" |
| 32 | #include "rockchip_drm_fb.h" | 32 | #include "rockchip_drm_fb.h" |
| @@ -50,35 +50,73 @@ static struct drm_driver rockchip_drm_driver; | |||
| 50 | int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, | 50 | int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, |
| 51 | struct device *dev) | 51 | struct device *dev) |
| 52 | { | 52 | { |
| 53 | struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; | 53 | struct rockchip_drm_private *private = drm_dev->dev_private; |
| 54 | int ret; | 54 | int ret; |
| 55 | 55 | ||
| 56 | if (!is_support_iommu) | 56 | if (!is_support_iommu) |
| 57 | return 0; | 57 | return 0; |
| 58 | 58 | ||
| 59 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); | 59 | ret = iommu_attach_device(private->domain, dev); |
| 60 | if (ret) | 60 | if (ret) { |
| 61 | dev_err(dev, "Failed to attach iommu device\n"); | ||
| 61 | return ret; | 62 | return ret; |
| 63 | } | ||
| 62 | 64 | ||
| 63 | dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); | 65 | return 0; |
| 64 | |||
| 65 | return arm_iommu_attach_device(dev, mapping); | ||
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, | 68 | void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, |
| 69 | struct device *dev) | 69 | struct device *dev) |
| 70 | { | 70 | { |
| 71 | struct rockchip_drm_private *private = drm_dev->dev_private; | ||
| 72 | struct iommu_domain *domain = private->domain; | ||
| 73 | |||
| 71 | if (!is_support_iommu) | 74 | if (!is_support_iommu) |
| 72 | return; | 75 | return; |
| 73 | 76 | ||
| 74 | arm_iommu_detach_device(dev); | 77 | iommu_detach_device(domain, dev); |
| 78 | } | ||
| 79 | |||
| 80 | static int rockchip_drm_init_iommu(struct drm_device *drm_dev) | ||
| 81 | { | ||
| 82 | struct rockchip_drm_private *private = drm_dev->dev_private; | ||
| 83 | struct iommu_domain_geometry *geometry; | ||
| 84 | u64 start, end; | ||
| 85 | |||
| 86 | if (!is_support_iommu) | ||
| 87 | return 0; | ||
| 88 | |||
| 89 | private->domain = iommu_domain_alloc(&platform_bus_type); | ||
| 90 | if (!private->domain) | ||
| 91 | return -ENOMEM; | ||
| 92 | |||
| 93 | geometry = &private->domain->geometry; | ||
| 94 | start = geometry->aperture_start; | ||
| 95 | end = geometry->aperture_end; | ||
| 96 | |||
| 97 | DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n", | ||
| 98 | start, end); | ||
| 99 | drm_mm_init(&private->mm, start, end - start + 1); | ||
| 100 | mutex_init(&private->mm_lock); | ||
| 101 | |||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | |||
| 105 | static void rockchip_iommu_cleanup(struct drm_device *drm_dev) | ||
| 106 | { | ||
| 107 | struct rockchip_drm_private *private = drm_dev->dev_private; | ||
| 108 | |||
| 109 | if (!is_support_iommu) | ||
| 110 | return; | ||
| 111 | |||
| 112 | drm_mm_takedown(&private->mm); | ||
| 113 | iommu_domain_free(private->domain); | ||
| 75 | } | 114 | } |
| 76 | 115 | ||
| 77 | static int rockchip_drm_bind(struct device *dev) | 116 | static int rockchip_drm_bind(struct device *dev) |
| 78 | { | 117 | { |
| 79 | struct drm_device *drm_dev; | 118 | struct drm_device *drm_dev; |
| 80 | struct rockchip_drm_private *private; | 119 | struct rockchip_drm_private *private; |
| 81 | struct dma_iommu_mapping *mapping = NULL; | ||
| 82 | int ret; | 120 | int ret; |
| 83 | 121 | ||
| 84 | drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); | 122 | drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); |
| @@ -102,38 +140,14 @@ static int rockchip_drm_bind(struct device *dev) | |||
| 102 | 140 | ||
| 103 | rockchip_drm_mode_config_init(drm_dev); | 141 | rockchip_drm_mode_config_init(drm_dev); |
| 104 | 142 | ||
| 105 | dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), | 143 | ret = rockchip_drm_init_iommu(drm_dev); |
| 106 | GFP_KERNEL); | 144 | if (ret) |
| 107 | if (!dev->dma_parms) { | ||
| 108 | ret = -ENOMEM; | ||
| 109 | goto err_config_cleanup; | 145 | goto err_config_cleanup; |
| 110 | } | ||
| 111 | |||
| 112 | if (is_support_iommu) { | ||
| 113 | /* TODO(djkurtz): fetch the mapping start/size from somewhere */ | ||
| 114 | mapping = arm_iommu_create_mapping(&platform_bus_type, | ||
| 115 | 0x00000000, | ||
| 116 | SZ_2G); | ||
| 117 | if (IS_ERR(mapping)) { | ||
| 118 | ret = PTR_ERR(mapping); | ||
| 119 | goto err_config_cleanup; | ||
| 120 | } | ||
| 121 | |||
| 122 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
| 123 | if (ret) | ||
| 124 | goto err_release_mapping; | ||
| 125 | |||
| 126 | dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); | ||
| 127 | |||
| 128 | ret = arm_iommu_attach_device(dev, mapping); | ||
| 129 | if (ret) | ||
| 130 | goto err_release_mapping; | ||
| 131 | } | ||
| 132 | 146 | ||
| 133 | /* Try to bind all sub drivers. */ | 147 | /* Try to bind all sub drivers. */ |
| 134 | ret = component_bind_all(dev, drm_dev); | 148 | ret = component_bind_all(dev, drm_dev); |
| 135 | if (ret) | 149 | if (ret) |
| 136 | goto err_detach_device; | 150 | goto err_iommu_cleanup; |
| 137 | 151 | ||
| 138 | /* init kms poll for handling hpd */ | 152 | /* init kms poll for handling hpd */ |
| 139 | drm_kms_helper_poll_init(drm_dev); | 153 | drm_kms_helper_poll_init(drm_dev); |
| @@ -158,8 +172,6 @@ static int rockchip_drm_bind(struct device *dev) | |||
| 158 | if (ret) | 172 | if (ret) |
| 159 | goto err_fbdev_fini; | 173 | goto err_fbdev_fini; |
| 160 | 174 | ||
| 161 | if (is_support_iommu) | ||
| 162 | arm_iommu_release_mapping(mapping); | ||
| 163 | return 0; | 175 | return 0; |
| 164 | err_fbdev_fini: | 176 | err_fbdev_fini: |
| 165 | rockchip_drm_fbdev_fini(drm_dev); | 177 | rockchip_drm_fbdev_fini(drm_dev); |
| @@ -168,12 +180,8 @@ err_vblank_cleanup: | |||
| 168 | err_kms_helper_poll_fini: | 180 | err_kms_helper_poll_fini: |
| 169 | drm_kms_helper_poll_fini(drm_dev); | 181 | drm_kms_helper_poll_fini(drm_dev); |
| 170 | component_unbind_all(dev, drm_dev); | 182 | component_unbind_all(dev, drm_dev); |
| 171 | err_detach_device: | 183 | err_iommu_cleanup: |
| 172 | if (is_support_iommu) | 184 | rockchip_iommu_cleanup(drm_dev); |
| 173 | arm_iommu_detach_device(dev); | ||
| 174 | err_release_mapping: | ||
| 175 | if (is_support_iommu) | ||
| 176 | arm_iommu_release_mapping(mapping); | ||
| 177 | err_config_cleanup: | 185 | err_config_cleanup: |
| 178 | drm_mode_config_cleanup(drm_dev); | 186 | drm_mode_config_cleanup(drm_dev); |
| 179 | drm_dev->dev_private = NULL; | 187 | drm_dev->dev_private = NULL; |
| @@ -190,8 +198,7 @@ static void rockchip_drm_unbind(struct device *dev) | |||
| 190 | drm_vblank_cleanup(drm_dev); | 198 | drm_vblank_cleanup(drm_dev); |
| 191 | drm_kms_helper_poll_fini(drm_dev); | 199 | drm_kms_helper_poll_fini(drm_dev); |
| 192 | component_unbind_all(dev, drm_dev); | 200 | component_unbind_all(dev, drm_dev); |
| 193 | if (is_support_iommu) | 201 | rockchip_iommu_cleanup(drm_dev); |
| 194 | arm_iommu_detach_device(dev); | ||
| 195 | drm_mode_config_cleanup(drm_dev); | 202 | drm_mode_config_cleanup(drm_dev); |
| 196 | drm_dev->dev_private = NULL; | 203 | drm_dev->dev_private = NULL; |
| 197 | drm_dev_unregister(drm_dev); | 204 | drm_dev_unregister(drm_dev); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 9f9bc959b108..8aca219ec4c8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | 30 | ||
| 31 | struct drm_device; | 31 | struct drm_device; |
| 32 | struct drm_connector; | 32 | struct drm_connector; |
| 33 | struct iommu_domain; | ||
| 33 | 34 | ||
| 34 | struct rockchip_crtc_state { | 35 | struct rockchip_crtc_state { |
| 35 | struct drm_crtc_state base; | 36 | struct drm_crtc_state base; |
| @@ -49,7 +50,10 @@ struct rockchip_drm_private { | |||
| 49 | struct drm_fb_helper fbdev_helper; | 50 | struct drm_fb_helper fbdev_helper; |
| 50 | struct drm_gem_object *fbdev_bo; | 51 | struct drm_gem_object *fbdev_bo; |
| 51 | struct drm_atomic_state *state; | 52 | struct drm_atomic_state *state; |
| 52 | 53 | struct iommu_domain *domain; | |
| 54 | /* protect drm_mm on multi-threads */ | ||
| 55 | struct mutex mm_lock; | ||
| 56 | struct drm_mm mm; | ||
| 53 | struct list_head psr_list; | 57 | struct list_head psr_list; |
| 54 | spinlock_t psr_list_lock; | 58 | spinlock_t psr_list_lock; |
| 55 | }; | 59 | }; |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 200b35b6b389..81f9548672b0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c | |||
| @@ -213,7 +213,7 @@ rockchip_drm_framebuffer_init(struct drm_device *dev, | |||
| 213 | 213 | ||
| 214 | rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1); | 214 | rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1); |
| 215 | if (IS_ERR(rockchip_fb)) | 215 | if (IS_ERR(rockchip_fb)) |
| 216 | return NULL; | 216 | return ERR_CAST(rockchip_fb); |
| 217 | 217 | ||
| 218 | return &rockchip_fb->fb; | 218 | return &rockchip_fb->fb; |
| 219 | } | 219 | } |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index b70f9423379c..df9e57064f19 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c | |||
| @@ -16,11 +16,146 @@ | |||
| 16 | #include <drm/drmP.h> | 16 | #include <drm/drmP.h> |
| 17 | #include <drm/drm_gem.h> | 17 | #include <drm/drm_gem.h> |
| 18 | #include <drm/drm_vma_manager.h> | 18 | #include <drm/drm_vma_manager.h> |
| 19 | #include <linux/iommu.h> | ||
| 19 | 20 | ||
| 20 | #include "rockchip_drm_drv.h" | 21 | #include "rockchip_drm_drv.h" |
| 21 | #include "rockchip_drm_gem.h" | 22 | #include "rockchip_drm_gem.h" |
| 22 | 23 | ||
| 23 | static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, | 24 | static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) |
| 25 | { | ||
| 26 | struct drm_device *drm = rk_obj->base.dev; | ||
| 27 | struct rockchip_drm_private *private = drm->dev_private; | ||
| 28 | int prot = IOMMU_READ | IOMMU_WRITE; | ||
| 29 | ssize_t ret; | ||
| 30 | |||
| 31 | mutex_lock(&private->mm_lock); | ||
| 32 | |||
| 33 | ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, | ||
| 34 | rk_obj->base.size, PAGE_SIZE, | ||
| 35 | 0, 0); | ||
| 36 | |||
| 37 | mutex_unlock(&private->mm_lock); | ||
| 38 | if (ret < 0) { | ||
| 39 | DRM_ERROR("out of I/O virtual memory: %zd\n", ret); | ||
| 40 | return ret; | ||
| 41 | } | ||
| 42 | |||
| 43 | rk_obj->dma_addr = rk_obj->mm.start; | ||
| 44 | |||
| 45 | ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, | ||
| 46 | rk_obj->sgt->nents, prot); | ||
| 47 | if (ret < rk_obj->base.size) { | ||
| 48 | DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", | ||
| 49 | ret, rk_obj->base.size); | ||
| 50 | ret = -ENOMEM; | ||
| 51 | goto err_remove_node; | ||
| 52 | } | ||
| 53 | |||
| 54 | rk_obj->size = ret; | ||
| 55 | |||
| 56 | return 0; | ||
| 57 | |||
| 58 | err_remove_node: | ||
| 59 | drm_mm_remove_node(&rk_obj->mm); | ||
| 60 | |||
| 61 | return ret; | ||
| 62 | } | ||
| 63 | |||
| 64 | static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj) | ||
| 65 | { | ||
| 66 | struct drm_device *drm = rk_obj->base.dev; | ||
| 67 | struct rockchip_drm_private *private = drm->dev_private; | ||
| 68 | |||
| 69 | iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size); | ||
| 70 | |||
| 71 | mutex_lock(&private->mm_lock); | ||
| 72 | |||
| 73 | drm_mm_remove_node(&rk_obj->mm); | ||
| 74 | |||
| 75 | mutex_unlock(&private->mm_lock); | ||
| 76 | |||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) | ||
| 81 | { | ||
| 82 | struct drm_device *drm = rk_obj->base.dev; | ||
| 83 | int ret, i; | ||
| 84 | struct scatterlist *s; | ||
| 85 | |||
| 86 | rk_obj->pages = drm_gem_get_pages(&rk_obj->base); | ||
| 87 | if (IS_ERR(rk_obj->pages)) | ||
| 88 | return PTR_ERR(rk_obj->pages); | ||
| 89 | |||
| 90 | rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; | ||
| 91 | |||
| 92 | rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); | ||
| 93 | if (IS_ERR(rk_obj->sgt)) { | ||
| 94 | ret = PTR_ERR(rk_obj->sgt); | ||
| 95 | goto err_put_pages; | ||
| 96 | } | ||
| 97 | |||
| 98 | /* | ||
| 99 | * Fake up the SG table so that dma_sync_sg_for_device() can be used | ||
| 100 | * to flush the pages associated with it. | ||
| 101 | * | ||
| 102 | * TODO: Replace this by drm_clflush_sg() once it can be implemented | ||
| 103 | * without relying on symbols that are not exported. | ||
| 104 | */ | ||
| 105 | for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) | ||
| 106 | sg_dma_address(s) = sg_phys(s); | ||
| 107 | |||
| 108 | dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, | ||
| 109 | DMA_TO_DEVICE); | ||
| 110 | |||
| 111 | return 0; | ||
| 112 | |||
| 113 | err_put_pages: | ||
| 114 | drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); | ||
| 115 | return ret; | ||
| 116 | } | ||
| 117 | |||
| 118 | static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj) | ||
| 119 | { | ||
| 120 | sg_free_table(rk_obj->sgt); | ||
| 121 | kfree(rk_obj->sgt); | ||
| 122 | drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); | ||
| 123 | } | ||
| 124 | |||
| 125 | static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj, | ||
| 126 | bool alloc_kmap) | ||
| 127 | { | ||
| 128 | int ret; | ||
| 129 | |||
| 130 | ret = rockchip_gem_get_pages(rk_obj); | ||
| 131 | if (ret < 0) | ||
| 132 | return ret; | ||
| 133 | |||
| 134 | ret = rockchip_gem_iommu_map(rk_obj); | ||
| 135 | if (ret < 0) | ||
| 136 | goto err_free; | ||
| 137 | |||
| 138 | if (alloc_kmap) { | ||
| 139 | rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, | ||
| 140 | pgprot_writecombine(PAGE_KERNEL)); | ||
| 141 | if (!rk_obj->kvaddr) { | ||
| 142 | DRM_ERROR("failed to vmap() buffer\n"); | ||
| 143 | ret = -ENOMEM; | ||
| 144 | goto err_unmap; | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | return 0; | ||
| 149 | |||
| 150 | err_unmap: | ||
| 151 | rockchip_gem_iommu_unmap(rk_obj); | ||
| 152 | err_free: | ||
| 153 | rockchip_gem_put_pages(rk_obj); | ||
| 154 | |||
| 155 | return ret; | ||
| 156 | } | ||
| 157 | |||
| 158 | static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj, | ||
| 24 | bool alloc_kmap) | 159 | bool alloc_kmap) |
| 25 | { | 160 | { |
| 26 | struct drm_gem_object *obj = &rk_obj->base; | 161 | struct drm_gem_object *obj = &rk_obj->base; |
| @@ -42,7 +177,27 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, | |||
| 42 | return 0; | 177 | return 0; |
| 43 | } | 178 | } |
| 44 | 179 | ||
| 45 | static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) | 180 | static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, |
| 181 | bool alloc_kmap) | ||
| 182 | { | ||
| 183 | struct drm_gem_object *obj = &rk_obj->base; | ||
| 184 | struct drm_device *drm = obj->dev; | ||
| 185 | struct rockchip_drm_private *private = drm->dev_private; | ||
| 186 | |||
| 187 | if (private->domain) | ||
| 188 | return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap); | ||
| 189 | else | ||
| 190 | return rockchip_gem_alloc_dma(rk_obj, alloc_kmap); | ||
| 191 | } | ||
| 192 | |||
| 193 | static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj) | ||
| 194 | { | ||
| 195 | vunmap(rk_obj->kvaddr); | ||
| 196 | rockchip_gem_iommu_unmap(rk_obj); | ||
| 197 | rockchip_gem_put_pages(rk_obj); | ||
| 198 | } | ||
| 199 | |||
| 200 | static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj) | ||
| 46 | { | 201 | { |
| 47 | struct drm_gem_object *obj = &rk_obj->base; | 202 | struct drm_gem_object *obj = &rk_obj->base; |
| 48 | struct drm_device *drm = obj->dev; | 203 | struct drm_device *drm = obj->dev; |
| @@ -51,23 +206,68 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) | |||
| 51 | rk_obj->dma_attrs); | 206 | rk_obj->dma_attrs); |
| 52 | } | 207 | } |
| 53 | 208 | ||
| 54 | static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, | 209 | static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) |
| 55 | struct vm_area_struct *vma) | 210 | { |
| 211 | if (rk_obj->pages) | ||
| 212 | rockchip_gem_free_iommu(rk_obj); | ||
| 213 | else | ||
| 214 | rockchip_gem_free_dma(rk_obj); | ||
| 215 | } | ||
| 56 | 216 | ||
| 217 | static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, | ||
| 218 | struct vm_area_struct *vma) | ||
| 57 | { | 219 | { |
| 220 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); | ||
| 221 | unsigned int i, count = obj->size >> PAGE_SHIFT; | ||
| 222 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | ||
| 223 | unsigned long uaddr = vma->vm_start; | ||
| 224 | unsigned long offset = vma->vm_pgoff; | ||
| 225 | unsigned long end = user_count + offset; | ||
| 58 | int ret; | 226 | int ret; |
| 227 | |||
| 228 | if (user_count == 0) | ||
| 229 | return -ENXIO; | ||
| 230 | if (end > count) | ||
| 231 | return -ENXIO; | ||
| 232 | |||
| 233 | for (i = offset; i < end; i++) { | ||
| 234 | ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]); | ||
| 235 | if (ret) | ||
| 236 | return ret; | ||
| 237 | uaddr += PAGE_SIZE; | ||
| 238 | } | ||
| 239 | |||
| 240 | return 0; | ||
| 241 | } | ||
| 242 | |||
| 243 | static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, | ||
| 244 | struct vm_area_struct *vma) | ||
| 245 | { | ||
| 59 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); | 246 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 60 | struct drm_device *drm = obj->dev; | 247 | struct drm_device *drm = obj->dev; |
| 61 | 248 | ||
| 249 | return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, | ||
| 250 | obj->size, rk_obj->dma_attrs); | ||
| 251 | } | ||
| 252 | |||
| 253 | static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, | ||
| 254 | struct vm_area_struct *vma) | ||
| 255 | { | ||
| 256 | int ret; | ||
| 257 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); | ||
| 258 | |||
| 62 | /* | 259 | /* |
| 63 | * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear | 260 | * We allocated a struct page table for rk_obj, so clear |
| 64 | * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). | 261 | * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). |
| 65 | */ | 262 | */ |
| 66 | vma->vm_flags &= ~VM_PFNMAP; | 263 | vma->vm_flags &= ~VM_PFNMAP; |
| 67 | vma->vm_pgoff = 0; | 264 | vma->vm_pgoff = 0; |
| 68 | 265 | ||
| 69 | ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, | 266 | if (rk_obj->pages) |
| 70 | obj->size, rk_obj->dma_attrs); | 267 | ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); |
| 268 | else | ||
| 269 | ret = rockchip_drm_gem_object_mmap_dma(obj, vma); | ||
| 270 | |||
| 71 | if (ret) | 271 | if (ret) |
| 72 | drm_gem_vm_close(vma); | 272 | drm_gem_vm_close(vma); |
| 73 | 273 | ||
| @@ -101,6 +301,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 101 | return rockchip_drm_gem_object_mmap(obj, vma); | 301 | return rockchip_drm_gem_object_mmap(obj, vma); |
| 102 | } | 302 | } |
| 103 | 303 | ||
| 304 | static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) | ||
| 305 | { | ||
| 306 | drm_gem_object_release(&rk_obj->base); | ||
| 307 | kfree(rk_obj); | ||
| 308 | } | ||
| 309 | |||
| 104 | struct rockchip_gem_object * | 310 | struct rockchip_gem_object * |
| 105 | rockchip_gem_create_object(struct drm_device *drm, unsigned int size, | 311 | rockchip_gem_create_object(struct drm_device *drm, unsigned int size, |
| 106 | bool alloc_kmap) | 312 | bool alloc_kmap) |
| @@ -117,7 +323,7 @@ struct rockchip_gem_object * | |||
| 117 | 323 | ||
| 118 | obj = &rk_obj->base; | 324 | obj = &rk_obj->base; |
| 119 | 325 | ||
| 120 | drm_gem_private_object_init(drm, obj, size); | 326 | drm_gem_object_init(drm, obj, size); |
| 121 | 327 | ||
| 122 | ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); | 328 | ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); |
| 123 | if (ret) | 329 | if (ret) |
| @@ -126,7 +332,7 @@ struct rockchip_gem_object * | |||
| 126 | return rk_obj; | 332 | return rk_obj; |
| 127 | 333 | ||
| 128 | err_free_rk_obj: | 334 | err_free_rk_obj: |
| 129 | kfree(rk_obj); | 335 | rockchip_gem_release_object(rk_obj); |
| 130 | return ERR_PTR(ret); | 336 | return ERR_PTR(ret); |
| 131 | } | 337 | } |
| 132 | 338 | ||
| @@ -138,13 +344,11 @@ void rockchip_gem_free_object(struct drm_gem_object *obj) | |||
| 138 | { | 344 | { |
| 139 | struct rockchip_gem_object *rk_obj; | 345 | struct rockchip_gem_object *rk_obj; |
| 140 | 346 | ||
| 141 | drm_gem_free_mmap_offset(obj); | ||
| 142 | |||
| 143 | rk_obj = to_rockchip_obj(obj); | 347 | rk_obj = to_rockchip_obj(obj); |
| 144 | 348 | ||
| 145 | rockchip_gem_free_buf(rk_obj); | 349 | rockchip_gem_free_buf(rk_obj); |
| 146 | 350 | ||
| 147 | kfree(rk_obj); | 351 | rockchip_gem_release_object(rk_obj); |
| 148 | } | 352 | } |
| 149 | 353 | ||
| 150 | /* | 354 | /* |
| @@ -253,6 +457,9 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) | |||
| 253 | struct sg_table *sgt; | 457 | struct sg_table *sgt; |
| 254 | int ret; | 458 | int ret; |
| 255 | 459 | ||
| 460 | if (rk_obj->pages) | ||
| 461 | return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); | ||
| 462 | |||
| 256 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | 463 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); |
| 257 | if (!sgt) | 464 | if (!sgt) |
| 258 | return ERR_PTR(-ENOMEM); | 465 | return ERR_PTR(-ENOMEM); |
| @@ -273,6 +480,10 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) | |||
| 273 | { | 480 | { |
| 274 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); | 481 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 275 | 482 | ||
| 483 | if (rk_obj->pages) | ||
| 484 | return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, | ||
| 485 | pgprot_writecombine(PAGE_KERNEL)); | ||
| 486 | |||
| 276 | if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) | 487 | if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
| 277 | return NULL; | 488 | return NULL; |
| 278 | 489 | ||
| @@ -281,5 +492,12 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) | |||
| 281 | 492 | ||
| 282 | void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | 493 | void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) |
| 283 | { | 494 | { |
| 284 | /* Nothing to do */ | 495 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
| 496 | |||
| 497 | if (rk_obj->pages) { | ||
| 498 | vunmap(vaddr); | ||
| 499 | return; | ||
| 500 | } | ||
| 501 | |||
| 502 | /* Nothing to do if allocated by DMA mapping API. */ | ||
| 285 | } | 503 | } |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 18b3488db4ec..3f6ea4d18a5c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h | |||
| @@ -23,7 +23,15 @@ struct rockchip_gem_object { | |||
| 23 | 23 | ||
| 24 | void *kvaddr; | 24 | void *kvaddr; |
| 25 | dma_addr_t dma_addr; | 25 | dma_addr_t dma_addr; |
| 26 | /* Used when IOMMU is disabled */ | ||
| 26 | unsigned long dma_attrs; | 27 | unsigned long dma_attrs; |
| 28 | |||
| 29 | /* Used when IOMMU is enabled */ | ||
| 30 | struct drm_mm_node mm; | ||
| 31 | unsigned long num_pages; | ||
| 32 | struct page **pages; | ||
| 33 | struct sg_table *sgt; | ||
| 34 | size_t size; | ||
| 27 | }; | 35 | }; |
| 28 | 36 | ||
| 29 | struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); | 37 | struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index ffee8d8c3794..7c2e7c481333 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
| @@ -531,6 +531,8 @@ static int vop_enable(struct drm_crtc *crtc) | |||
| 531 | } | 531 | } |
| 532 | 532 | ||
| 533 | memcpy(vop->regs, vop->regsbak, vop->len); | 533 | memcpy(vop->regs, vop->regsbak, vop->len); |
| 534 | vop_cfg_done(vop); | ||
| 535 | |||
| 534 | /* | 536 | /* |
| 535 | * At here, vop clock & iommu is enable, R/W vop regs would be safe. | 537 | * At here, vop clock & iommu is enable, R/W vop regs would be safe. |
| 536 | */ | 538 | */ |
| @@ -582,6 +584,8 @@ static void vop_crtc_disable(struct drm_crtc *crtc) | |||
| 582 | spin_unlock(&vop->reg_lock); | 584 | spin_unlock(&vop->reg_lock); |
| 583 | } | 585 | } |
| 584 | 586 | ||
| 587 | vop_cfg_done(vop); | ||
| 588 | |||
| 585 | drm_crtc_vblank_off(crtc); | 589 | drm_crtc_vblank_off(crtc); |
| 586 | 590 | ||
| 587 | /* | 591 | /* |
| @@ -927,9 +931,11 @@ static void vop_crtc_enable(struct drm_crtc *crtc) | |||
| 927 | vop_dsp_hold_valid_irq_disable(vop); | 931 | vop_dsp_hold_valid_irq_disable(vop); |
| 928 | } | 932 | } |
| 929 | 933 | ||
| 930 | pin_pol = 0x8; | 934 | pin_pol = BIT(DCLK_INVERT); |
| 931 | pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; | 935 | pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? |
| 932 | pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1); | 936 | 0 : BIT(HSYNC_POSITIVE); |
| 937 | pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? | ||
| 938 | 0 : BIT(VSYNC_POSITIVE); | ||
| 933 | VOP_CTRL_SET(vop, pin_pol, pin_pol); | 939 | VOP_CTRL_SET(vop, pin_pol, pin_pol); |
| 934 | 940 | ||
| 935 | switch (s->output_type) { | 941 | switch (s->output_type) { |
| @@ -949,6 +955,11 @@ static void vop_crtc_enable(struct drm_crtc *crtc) | |||
| 949 | VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol); | 955 | VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol); |
| 950 | VOP_CTRL_SET(vop, mipi_en, 1); | 956 | VOP_CTRL_SET(vop, mipi_en, 1); |
| 951 | break; | 957 | break; |
| 958 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
| 959 | pin_pol &= ~BIT(DCLK_INVERT); | ||
| 960 | VOP_CTRL_SET(vop, dp_pin_pol, pin_pol); | ||
| 961 | VOP_CTRL_SET(vop, dp_en, 1); | ||
| 962 | break; | ||
| 952 | default: | 963 | default: |
| 953 | DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", | 964 | DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", |
| 954 | s->output_type); | 965 | s->output_type); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 1dbc52615257..5a4faa85dbd2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h | |||
| @@ -45,6 +45,7 @@ struct vop_ctrl { | |||
| 45 | struct vop_reg edp_en; | 45 | struct vop_reg edp_en; |
| 46 | struct vop_reg hdmi_en; | 46 | struct vop_reg hdmi_en; |
| 47 | struct vop_reg mipi_en; | 47 | struct vop_reg mipi_en; |
| 48 | struct vop_reg dp_en; | ||
| 48 | struct vop_reg out_mode; | 49 | struct vop_reg out_mode; |
| 49 | struct vop_reg dither_down; | 50 | struct vop_reg dither_down; |
| 50 | struct vop_reg dither_up; | 51 | struct vop_reg dither_up; |
| @@ -53,6 +54,7 @@ struct vop_ctrl { | |||
| 53 | struct vop_reg hdmi_pin_pol; | 54 | struct vop_reg hdmi_pin_pol; |
| 54 | struct vop_reg edp_pin_pol; | 55 | struct vop_reg edp_pin_pol; |
| 55 | struct vop_reg mipi_pin_pol; | 56 | struct vop_reg mipi_pin_pol; |
| 57 | struct vop_reg dp_pin_pol; | ||
| 56 | 58 | ||
| 57 | struct vop_reg htotal_pw; | 59 | struct vop_reg htotal_pw; |
| 58 | struct vop_reg hact_st_end; | 60 | struct vop_reg hact_st_end; |
| @@ -244,6 +246,13 @@ enum scale_down_mode { | |||
| 244 | SCALE_DOWN_AVG = 0x1 | 246 | SCALE_DOWN_AVG = 0x1 |
| 245 | }; | 247 | }; |
| 246 | 248 | ||
| 249 | enum vop_pol { | ||
| 250 | HSYNC_POSITIVE = 0, | ||
| 251 | VSYNC_POSITIVE = 1, | ||
| 252 | DEN_NEGATIVE = 2, | ||
| 253 | DCLK_INVERT = 3 | ||
| 254 | }; | ||
| 255 | |||
| 247 | #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) | 256 | #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) |
| 248 | #define SCL_FT_DEFAULT_FIXPOINT_SHIFT 12 | 257 | #define SCL_FT_DEFAULT_FIXPOINT_SHIFT 12 |
| 249 | #define SCL_MAX_VSKIPLINES 4 | 258 | #define SCL_MAX_VSKIPLINES 4 |
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 35c51f3402f2..91fbc7b52147 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c | |||
| @@ -284,6 +284,7 @@ static const struct vop_data rk3288_vop = { | |||
| 284 | static const struct vop_ctrl rk3399_ctrl_data = { | 284 | static const struct vop_ctrl rk3399_ctrl_data = { |
| 285 | .standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22), | 285 | .standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22), |
| 286 | .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23), | 286 | .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23), |
| 287 | .dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11), | ||
| 287 | .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12), | 288 | .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12), |
| 288 | .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13), | 289 | .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13), |
| 289 | .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14), | 290 | .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14), |
| @@ -293,6 +294,7 @@ static const struct vop_ctrl rk3399_ctrl_data = { | |||
| 293 | .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19), | 294 | .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19), |
| 294 | .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0), | 295 | .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0), |
| 295 | .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), | 296 | .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), |
| 297 | .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), | ||
| 296 | .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20), | 298 | .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20), |
| 297 | .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24), | 299 | .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24), |
| 298 | .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28), | 300 | .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28), |
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index e992bed98dcb..d45a4335df5d 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c | |||
| @@ -134,21 +134,6 @@ sti_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 134 | sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); | 134 | sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static void sti_crtc_atomic_begin(struct drm_crtc *crtc, | ||
| 138 | struct drm_crtc_state *old_crtc_state) | ||
| 139 | { | ||
| 140 | struct sti_mixer *mixer = to_sti_mixer(crtc); | ||
| 141 | |||
| 142 | if (crtc->state->event) { | ||
| 143 | crtc->state->event->pipe = drm_crtc_index(crtc); | ||
| 144 | |||
| 145 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | ||
| 146 | |||
| 147 | mixer->pending_event = crtc->state->event; | ||
| 148 | crtc->state->event = NULL; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | |||
| 152 | static void sti_crtc_atomic_flush(struct drm_crtc *crtc, | 137 | static void sti_crtc_atomic_flush(struct drm_crtc *crtc, |
| 153 | struct drm_crtc_state *old_crtc_state) | 138 | struct drm_crtc_state *old_crtc_state) |
| 154 | { | 139 | { |
| @@ -156,6 +141,8 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 156 | struct sti_mixer *mixer = to_sti_mixer(crtc); | 141 | struct sti_mixer *mixer = to_sti_mixer(crtc); |
| 157 | struct sti_compositor *compo = dev_get_drvdata(mixer->dev); | 142 | struct sti_compositor *compo = dev_get_drvdata(mixer->dev); |
| 158 | struct drm_plane *p; | 143 | struct drm_plane *p; |
| 144 | struct drm_pending_vblank_event *event; | ||
| 145 | unsigned long flags; | ||
| 159 | 146 | ||
| 160 | DRM_DEBUG_DRIVER("\n"); | 147 | DRM_DEBUG_DRIVER("\n"); |
| 161 | 148 | ||
| @@ -220,13 +207,24 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 220 | break; | 207 | break; |
| 221 | } | 208 | } |
| 222 | } | 209 | } |
| 210 | |||
| 211 | event = crtc->state->event; | ||
| 212 | if (event) { | ||
| 213 | crtc->state->event = NULL; | ||
| 214 | |||
| 215 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
| 216 | if (drm_crtc_vblank_get(crtc) == 0) | ||
| 217 | drm_crtc_arm_vblank_event(crtc, event); | ||
| 218 | else | ||
| 219 | drm_crtc_send_vblank_event(crtc, event); | ||
| 220 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 221 | } | ||
| 223 | } | 222 | } |
| 224 | 223 | ||
| 225 | static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { | 224 | static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { |
| 226 | .enable = sti_crtc_enable, | 225 | .enable = sti_crtc_enable, |
| 227 | .disable = sti_crtc_disabling, | 226 | .disable = sti_crtc_disabling, |
| 228 | .mode_set_nofb = sti_crtc_mode_set_nofb, | 227 | .mode_set_nofb = sti_crtc_mode_set_nofb, |
| 229 | .atomic_begin = sti_crtc_atomic_begin, | ||
| 230 | .atomic_flush = sti_crtc_atomic_flush, | 228 | .atomic_flush = sti_crtc_atomic_flush, |
| 231 | }; | 229 | }; |
| 232 | 230 | ||
| @@ -250,7 +248,6 @@ int sti_crtc_vblank_cb(struct notifier_block *nb, | |||
| 250 | struct sti_compositor *compo; | 248 | struct sti_compositor *compo; |
| 251 | struct drm_crtc *crtc = data; | 249 | struct drm_crtc *crtc = data; |
| 252 | struct sti_mixer *mixer; | 250 | struct sti_mixer *mixer; |
| 253 | unsigned long flags; | ||
| 254 | struct sti_private *priv; | 251 | struct sti_private *priv; |
| 255 | unsigned int pipe; | 252 | unsigned int pipe; |
| 256 | 253 | ||
| @@ -267,14 +264,6 @@ int sti_crtc_vblank_cb(struct notifier_block *nb, | |||
| 267 | 264 | ||
| 268 | drm_crtc_handle_vblank(crtc); | 265 | drm_crtc_handle_vblank(crtc); |
| 269 | 266 | ||
| 270 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
| 271 | if (mixer->pending_event) { | ||
| 272 | drm_crtc_send_vblank_event(crtc, mixer->pending_event); | ||
| 273 | drm_crtc_vblank_put(crtc); | ||
| 274 | mixer->pending_event = NULL; | ||
| 275 | } | ||
| 276 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 277 | |||
| 278 | if (mixer->status == STI_MIXER_DISABLING) { | 267 | if (mixer->status == STI_MIXER_DISABLING) { |
| 279 | struct drm_plane *p; | 268 | struct drm_plane *p; |
| 280 | 269 | ||
| @@ -317,19 +306,12 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe) | |||
| 317 | struct sti_private *priv = drm_dev->dev_private; | 306 | struct sti_private *priv = drm_dev->dev_private; |
| 318 | struct sti_compositor *compo = priv->compo; | 307 | struct sti_compositor *compo = priv->compo; |
| 319 | struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe]; | 308 | struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe]; |
| 320 | struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc; | ||
| 321 | struct sti_vtg *vtg = compo->vtg[pipe]; | 309 | struct sti_vtg *vtg = compo->vtg[pipe]; |
| 322 | 310 | ||
| 323 | DRM_DEBUG_DRIVER("\n"); | 311 | DRM_DEBUG_DRIVER("\n"); |
| 324 | 312 | ||
| 325 | if (sti_vtg_unregister_client(vtg, vtg_vblank_nb)) | 313 | if (sti_vtg_unregister_client(vtg, vtg_vblank_nb)) |
| 326 | DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); | 314 | DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); |
| 327 | |||
| 328 | /* free the resources of the pending requests */ | ||
| 329 | if (compo->mixer[pipe]->pending_event) { | ||
| 330 | drm_crtc_vblank_put(crtc); | ||
| 331 | compo->mixer[pipe]->pending_event = NULL; | ||
| 332 | } | ||
| 333 | } | 315 | } |
| 334 | 316 | ||
| 335 | static int sti_crtc_late_register(struct drm_crtc *crtc) | 317 | static int sti_crtc_late_register(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index d4b771d2d10d..3114f125b863 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c | |||
| @@ -58,7 +58,9 @@ static int sti_drm_fps_set(void *data, u64 val) | |||
| 58 | list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { | 58 | list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { |
| 59 | struct sti_plane *plane = to_sti_plane(p); | 59 | struct sti_plane *plane = to_sti_plane(p); |
| 60 | 60 | ||
| 61 | memset(&plane->fps_info, 0, sizeof(plane->fps_info)); | ||
| 61 | plane->fps_info.output = (val >> i) & 1; | 62 | plane->fps_info.output = (val >> i) & 1; |
| 63 | |||
| 62 | i++; | 64 | i++; |
| 63 | } | 65 | } |
| 64 | 66 | ||
| @@ -115,52 +117,6 @@ err: | |||
| 115 | return ret; | 117 | return ret; |
| 116 | } | 118 | } |
| 117 | 119 | ||
| 118 | static void sti_atomic_schedule(struct sti_private *private, | ||
| 119 | struct drm_atomic_state *state) | ||
| 120 | { | ||
| 121 | private->commit.state = state; | ||
| 122 | schedule_work(&private->commit.work); | ||
| 123 | } | ||
| 124 | |||
| 125 | static void sti_atomic_complete(struct sti_private *private, | ||
| 126 | struct drm_atomic_state *state) | ||
| 127 | { | ||
| 128 | struct drm_device *drm = private->drm_dev; | ||
| 129 | |||
| 130 | /* | ||
| 131 | * Everything below can be run asynchronously without the need to grab | ||
| 132 | * any modeset locks at all under one condition: It must be guaranteed | ||
| 133 | * that the asynchronous work has either been cancelled (if the driver | ||
| 134 | * supports it, which at least requires that the framebuffers get | ||
| 135 | * cleaned up with drm_atomic_helper_cleanup_planes()) or completed | ||
| 136 | * before the new state gets committed on the software side with | ||
| 137 | * drm_atomic_helper_swap_state(). | ||
| 138 | * | ||
| 139 | * This scheme allows new atomic state updates to be prepared and | ||
| 140 | * checked in parallel to the asynchronous completion of the previous | ||
| 141 | * update. Which is important since compositors need to figure out the | ||
| 142 | * composition of the next frame right after having submitted the | ||
| 143 | * current layout. | ||
| 144 | */ | ||
| 145 | |||
| 146 | drm_atomic_helper_commit_modeset_disables(drm, state); | ||
| 147 | drm_atomic_helper_commit_planes(drm, state, 0); | ||
| 148 | drm_atomic_helper_commit_modeset_enables(drm, state); | ||
| 149 | |||
| 150 | drm_atomic_helper_wait_for_vblanks(drm, state); | ||
| 151 | |||
| 152 | drm_atomic_helper_cleanup_planes(drm, state); | ||
| 153 | drm_atomic_state_put(state); | ||
| 154 | } | ||
| 155 | |||
| 156 | static void sti_atomic_work(struct work_struct *work) | ||
| 157 | { | ||
| 158 | struct sti_private *private = container_of(work, | ||
| 159 | struct sti_private, commit.work); | ||
| 160 | |||
| 161 | sti_atomic_complete(private, private->commit.state); | ||
| 162 | } | ||
| 163 | |||
| 164 | static int sti_atomic_check(struct drm_device *dev, | 120 | static int sti_atomic_check(struct drm_device *dev, |
| 165 | struct drm_atomic_state *state) | 121 | struct drm_atomic_state *state) |
| 166 | { | 122 | { |
| @@ -181,38 +137,6 @@ static int sti_atomic_check(struct drm_device *dev, | |||
| 181 | return ret; | 137 | return ret; |
| 182 | } | 138 | } |
| 183 | 139 | ||
| 184 | static int sti_atomic_commit(struct drm_device *drm, | ||
| 185 | struct drm_atomic_state *state, bool nonblock) | ||
| 186 | { | ||
| 187 | struct sti_private *private = drm->dev_private; | ||
| 188 | int err; | ||
| 189 | |||
| 190 | err = drm_atomic_helper_prepare_planes(drm, state); | ||
| 191 | if (err) | ||
| 192 | return err; | ||
| 193 | |||
| 194 | /* serialize outstanding nonblocking commits */ | ||
| 195 | mutex_lock(&private->commit.lock); | ||
| 196 | flush_work(&private->commit.work); | ||
| 197 | |||
| 198 | /* | ||
| 199 | * This is the point of no return - everything below never fails except | ||
| 200 | * when the hw goes bonghits. Which means we can commit the new state on | ||
| 201 | * the software side now. | ||
| 202 | */ | ||
| 203 | |||
| 204 | drm_atomic_helper_swap_state(state, true); | ||
| 205 | |||
| 206 | drm_atomic_state_get(state); | ||
| 207 | if (nonblock) | ||
| 208 | sti_atomic_schedule(private, state); | ||
| 209 | else | ||
| 210 | sti_atomic_complete(private, state); | ||
| 211 | |||
| 212 | mutex_unlock(&private->commit.lock); | ||
| 213 | return 0; | ||
| 214 | } | ||
| 215 | |||
| 216 | static void sti_output_poll_changed(struct drm_device *ddev) | 140 | static void sti_output_poll_changed(struct drm_device *ddev) |
| 217 | { | 141 | { |
| 218 | struct sti_private *private = ddev->dev_private; | 142 | struct sti_private *private = ddev->dev_private; |
| @@ -224,7 +148,7 @@ static const struct drm_mode_config_funcs sti_mode_config_funcs = { | |||
| 224 | .fb_create = drm_fb_cma_create, | 148 | .fb_create = drm_fb_cma_create, |
| 225 | .output_poll_changed = sti_output_poll_changed, | 149 | .output_poll_changed = sti_output_poll_changed, |
| 226 | .atomic_check = sti_atomic_check, | 150 | .atomic_check = sti_atomic_check, |
| 227 | .atomic_commit = sti_atomic_commit, | 151 | .atomic_commit = drm_atomic_helper_commit, |
| 228 | }; | 152 | }; |
| 229 | 153 | ||
| 230 | static void sti_mode_config_init(struct drm_device *dev) | 154 | static void sti_mode_config_init(struct drm_device *dev) |
| @@ -303,9 +227,6 @@ static int sti_init(struct drm_device *ddev) | |||
| 303 | dev_set_drvdata(ddev->dev, ddev); | 227 | dev_set_drvdata(ddev->dev, ddev); |
| 304 | private->drm_dev = ddev; | 228 | private->drm_dev = ddev; |
| 305 | 229 | ||
| 306 | mutex_init(&private->commit.lock); | ||
| 307 | INIT_WORK(&private->commit.work, sti_atomic_work); | ||
| 308 | |||
| 309 | drm_mode_config_init(ddev); | 230 | drm_mode_config_init(ddev); |
| 310 | 231 | ||
| 311 | sti_mode_config_init(ddev); | 232 | sti_mode_config_init(ddev); |
| @@ -326,6 +247,7 @@ static void sti_cleanup(struct drm_device *ddev) | |||
| 326 | 247 | ||
| 327 | drm_kms_helper_poll_fini(ddev); | 248 | drm_kms_helper_poll_fini(ddev); |
| 328 | drm_vblank_cleanup(ddev); | 249 | drm_vblank_cleanup(ddev); |
| 250 | component_unbind_all(ddev->dev, ddev); | ||
| 329 | kfree(private); | 251 | kfree(private); |
| 330 | ddev->dev_private = NULL; | 252 | ddev->dev_private = NULL; |
| 331 | } | 253 | } |
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h index 4c75845cc9ab..6502ed2d3351 100644 --- a/drivers/gpu/drm/sti/sti_drv.h +++ b/drivers/gpu/drm/sti/sti_drv.h | |||
| @@ -25,12 +25,6 @@ struct sti_private { | |||
| 25 | struct drm_property *plane_zorder_property; | 25 | struct drm_property *plane_zorder_property; |
| 26 | struct drm_device *drm_dev; | 26 | struct drm_device *drm_dev; |
| 27 | struct drm_fbdev_cma *fbdev; | 27 | struct drm_fbdev_cma *fbdev; |
| 28 | |||
| 29 | struct { | ||
| 30 | struct drm_atomic_state *state; | ||
| 31 | struct work_struct work; | ||
| 32 | struct mutex lock; | ||
| 33 | } commit; | ||
| 34 | }; | 28 | }; |
| 35 | 29 | ||
| 36 | extern struct platform_driver sti_tvout_driver; | 30 | extern struct platform_driver sti_tvout_driver; |
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 877d053d86f4..86279f5022c2 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c | |||
| @@ -610,7 +610,6 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, | |||
| 610 | struct sti_plane *plane = to_sti_plane(drm_plane); | 610 | struct sti_plane *plane = to_sti_plane(drm_plane); |
| 611 | struct sti_gdp *gdp = to_sti_gdp(plane); | 611 | struct sti_gdp *gdp = to_sti_gdp(plane); |
| 612 | struct drm_crtc *crtc = state->crtc; | 612 | struct drm_crtc *crtc = state->crtc; |
| 613 | struct sti_compositor *compo = dev_get_drvdata(gdp->dev); | ||
| 614 | struct drm_framebuffer *fb = state->fb; | 613 | struct drm_framebuffer *fb = state->fb; |
| 615 | struct drm_crtc_state *crtc_state; | 614 | struct drm_crtc_state *crtc_state; |
| 616 | struct sti_mixer *mixer; | 615 | struct sti_mixer *mixer; |
| @@ -648,45 +647,30 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, | |||
| 648 | return -EINVAL; | 647 | return -EINVAL; |
| 649 | } | 648 | } |
| 650 | 649 | ||
| 651 | if (!gdp->vtg) { | 650 | /* Set gdp clock */ |
| 652 | /* Register gdp callback */ | 651 | if (mode->clock && gdp->clk_pix) { |
| 653 | gdp->vtg = compo->vtg[mixer->id]; | 652 | struct clk *clkp; |
| 654 | if (sti_vtg_register_client(gdp->vtg, | 653 | int rate = mode->clock * 1000; |
| 655 | &gdp->vtg_field_nb, crtc)) { | 654 | int res; |
| 656 | DRM_ERROR("Cannot register VTG notifier\n"); | 655 | |
| 656 | /* | ||
| 657 | * According to the mixer used, the gdp pixel clock | ||
| 658 | * should have a different parent clock. | ||
| 659 | */ | ||
| 660 | if (mixer->id == STI_MIXER_MAIN) | ||
| 661 | clkp = gdp->clk_main_parent; | ||
| 662 | else | ||
| 663 | clkp = gdp->clk_aux_parent; | ||
| 664 | |||
| 665 | if (clkp) | ||
| 666 | clk_set_parent(gdp->clk_pix, clkp); | ||
| 667 | |||
| 668 | res = clk_set_rate(gdp->clk_pix, rate); | ||
| 669 | if (res < 0) { | ||
| 670 | DRM_ERROR("Cannot set rate (%dHz) for gdp\n", | ||
| 671 | rate); | ||
| 657 | return -EINVAL; | 672 | return -EINVAL; |
| 658 | } | 673 | } |
| 659 | |||
| 660 | /* Set and enable gdp clock */ | ||
| 661 | if (gdp->clk_pix) { | ||
| 662 | struct clk *clkp; | ||
| 663 | int rate = mode->clock * 1000; | ||
| 664 | int res; | ||
| 665 | |||
| 666 | /* | ||
| 667 | * According to the mixer used, the gdp pixel clock | ||
| 668 | * should have a different parent clock. | ||
| 669 | */ | ||
| 670 | if (mixer->id == STI_MIXER_MAIN) | ||
| 671 | clkp = gdp->clk_main_parent; | ||
| 672 | else | ||
| 673 | clkp = gdp->clk_aux_parent; | ||
| 674 | |||
| 675 | if (clkp) | ||
| 676 | clk_set_parent(gdp->clk_pix, clkp); | ||
| 677 | |||
| 678 | res = clk_set_rate(gdp->clk_pix, rate); | ||
| 679 | if (res < 0) { | ||
| 680 | DRM_ERROR("Cannot set rate (%dHz) for gdp\n", | ||
| 681 | rate); | ||
| 682 | return -EINVAL; | ||
| 683 | } | ||
| 684 | |||
| 685 | if (clk_prepare_enable(gdp->clk_pix)) { | ||
| 686 | DRM_ERROR("Failed to prepare/enable gdp\n"); | ||
| 687 | return -EINVAL; | ||
| 688 | } | ||
| 689 | } | ||
| 690 | } | 674 | } |
| 691 | 675 | ||
| 692 | DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", | 676 | DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", |
| @@ -724,6 +708,31 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane, | |||
| 724 | if (!crtc || !fb) | 708 | if (!crtc || !fb) |
| 725 | return; | 709 | return; |
| 726 | 710 | ||
| 711 | if ((oldstate->fb == state->fb) && | ||
| 712 | (oldstate->crtc_x == state->crtc_x) && | ||
| 713 | (oldstate->crtc_y == state->crtc_y) && | ||
| 714 | (oldstate->crtc_w == state->crtc_w) && | ||
| 715 | (oldstate->crtc_h == state->crtc_h) && | ||
| 716 | (oldstate->src_x == state->src_x) && | ||
| 717 | (oldstate->src_y == state->src_y) && | ||
| 718 | (oldstate->src_w == state->src_w) && | ||
| 719 | (oldstate->src_h == state->src_h)) { | ||
| 720 | /* No change since last update, do not post cmd */ | ||
| 721 | DRM_DEBUG_DRIVER("No change, not posting cmd\n"); | ||
| 722 | plane->status = STI_PLANE_UPDATED; | ||
| 723 | return; | ||
| 724 | } | ||
| 725 | |||
| 726 | if (!gdp->vtg) { | ||
| 727 | struct sti_compositor *compo = dev_get_drvdata(gdp->dev); | ||
| 728 | struct sti_mixer *mixer = to_sti_mixer(crtc); | ||
| 729 | |||
| 730 | /* Register gdp callback */ | ||
| 731 | gdp->vtg = compo->vtg[mixer->id]; | ||
| 732 | sti_vtg_register_client(gdp->vtg, &gdp->vtg_field_nb, crtc); | ||
| 733 | clk_prepare_enable(gdp->clk_pix); | ||
| 734 | } | ||
| 735 | |||
| 727 | mode = &crtc->mode; | 736 | mode = &crtc->mode; |
| 728 | dst_x = state->crtc_x; | 737 | dst_x = state->crtc_x; |
| 729 | dst_y = state->crtc_y; | 738 | dst_y = state->crtc_y; |
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index c9151849d604..ce2dcba679d5 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c | |||
| @@ -95,7 +95,6 @@ | |||
| 95 | #define HDMI_CFG_HDCP_EN BIT(2) | 95 | #define HDMI_CFG_HDCP_EN BIT(2) |
| 96 | #define HDMI_CFG_ESS_NOT_OESS BIT(3) | 96 | #define HDMI_CFG_ESS_NOT_OESS BIT(3) |
| 97 | #define HDMI_CFG_H_SYNC_POL_NEG BIT(4) | 97 | #define HDMI_CFG_H_SYNC_POL_NEG BIT(4) |
| 98 | #define HDMI_CFG_SINK_TERM_DET_EN BIT(5) | ||
| 99 | #define HDMI_CFG_V_SYNC_POL_NEG BIT(6) | 98 | #define HDMI_CFG_V_SYNC_POL_NEG BIT(6) |
| 100 | #define HDMI_CFG_422_EN BIT(8) | 99 | #define HDMI_CFG_422_EN BIT(8) |
| 101 | #define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12) | 100 | #define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12) |
| @@ -159,7 +158,6 @@ struct sti_hdmi_connector { | |||
| 159 | struct drm_encoder *encoder; | 158 | struct drm_encoder *encoder; |
| 160 | struct sti_hdmi *hdmi; | 159 | struct sti_hdmi *hdmi; |
| 161 | struct drm_property *colorspace_property; | 160 | struct drm_property *colorspace_property; |
| 162 | struct drm_property *hdmi_mode_property; | ||
| 163 | }; | 161 | }; |
| 164 | 162 | ||
| 165 | #define to_sti_hdmi_connector(x) \ | 163 | #define to_sti_hdmi_connector(x) \ |
| @@ -266,12 +264,9 @@ static void hdmi_config(struct sti_hdmi *hdmi) | |||
| 266 | 264 | ||
| 267 | /* Select encryption type and the framing mode */ | 265 | /* Select encryption type and the framing mode */ |
| 268 | conf |= HDMI_CFG_ESS_NOT_OESS; | 266 | conf |= HDMI_CFG_ESS_NOT_OESS; |
| 269 | if (hdmi->hdmi_mode == HDMI_MODE_HDMI) | 267 | if (hdmi->hdmi_monitor) |
| 270 | conf |= HDMI_CFG_HDMI_NOT_DVI; | 268 | conf |= HDMI_CFG_HDMI_NOT_DVI; |
| 271 | 269 | ||
| 272 | /* Enable sink term detection */ | ||
| 273 | conf |= HDMI_CFG_SINK_TERM_DET_EN; | ||
| 274 | |||
| 275 | /* Set Hsync polarity */ | 270 | /* Set Hsync polarity */ |
| 276 | if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) { | 271 | if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) { |
| 277 | DRM_DEBUG_DRIVER("H Sync Negative\n"); | 272 | DRM_DEBUG_DRIVER("H Sync Negative\n"); |
| @@ -607,9 +602,6 @@ static void hdmi_dbg_cfg(struct seq_file *s, int val) | |||
| 607 | tmp = val & HDMI_CFG_ESS_NOT_OESS; | 602 | tmp = val & HDMI_CFG_ESS_NOT_OESS; |
| 608 | DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable"); | 603 | DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable"); |
| 609 | seq_puts(s, "\t\t\t\t\t"); | 604 | seq_puts(s, "\t\t\t\t\t"); |
| 610 | tmp = val & HDMI_CFG_SINK_TERM_DET_EN; | ||
| 611 | DBGFS_PRINT_STR("Sink term detection:", tmp ? "enable" : "disable"); | ||
| 612 | seq_puts(s, "\t\t\t\t\t"); | ||
| 613 | tmp = val & HDMI_CFG_H_SYNC_POL_NEG; | 605 | tmp = val & HDMI_CFG_H_SYNC_POL_NEG; |
| 614 | DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal"); | 606 | DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal"); |
| 615 | seq_puts(s, "\t\t\t\t\t"); | 607 | seq_puts(s, "\t\t\t\t\t"); |
| @@ -977,6 +969,11 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector) | |||
| 977 | if (!edid) | 969 | if (!edid) |
| 978 | goto fail; | 970 | goto fail; |
| 979 | 971 | ||
| 972 | hdmi->hdmi_monitor = drm_detect_hdmi_monitor(edid); | ||
| 973 | DRM_DEBUG_KMS("%s : %dx%d cm\n", | ||
| 974 | (hdmi->hdmi_monitor ? "hdmi monitor" : "dvi monitor"), | ||
| 975 | edid->width_cm, edid->height_cm); | ||
| 976 | |||
| 980 | count = drm_add_edid_modes(connector, edid); | 977 | count = drm_add_edid_modes(connector, edid); |
| 981 | drm_mode_connector_update_edid_property(connector, edid); | 978 | drm_mode_connector_update_edid_property(connector, edid); |
| 982 | drm_edid_to_eld(connector, edid); | 979 | drm_edid_to_eld(connector, edid); |
| @@ -1060,19 +1057,6 @@ static void sti_hdmi_connector_init_property(struct drm_device *drm_dev, | |||
| 1060 | } | 1057 | } |
| 1061 | hdmi_connector->colorspace_property = prop; | 1058 | hdmi_connector->colorspace_property = prop; |
| 1062 | drm_object_attach_property(&connector->base, prop, hdmi->colorspace); | 1059 | drm_object_attach_property(&connector->base, prop, hdmi->colorspace); |
| 1063 | |||
| 1064 | /* hdmi_mode property */ | ||
| 1065 | hdmi->hdmi_mode = DEFAULT_HDMI_MODE; | ||
| 1066 | prop = drm_property_create_enum(drm_dev, 0, "hdmi_mode", | ||
| 1067 | hdmi_mode_names, | ||
| 1068 | ARRAY_SIZE(hdmi_mode_names)); | ||
| 1069 | if (!prop) { | ||
| 1070 | DRM_ERROR("fails to create colorspace property\n"); | ||
| 1071 | return; | ||
| 1072 | } | ||
| 1073 | hdmi_connector->hdmi_mode_property = prop; | ||
| 1074 | drm_object_attach_property(&connector->base, prop, hdmi->hdmi_mode); | ||
| 1075 | |||
| 1076 | } | 1060 | } |
| 1077 | 1061 | ||
| 1078 | static int | 1062 | static int |
| @@ -1090,11 +1074,6 @@ sti_hdmi_connector_set_property(struct drm_connector *connector, | |||
| 1090 | return 0; | 1074 | return 0; |
| 1091 | } | 1075 | } |
| 1092 | 1076 | ||
| 1093 | if (property == hdmi_connector->hdmi_mode_property) { | ||
| 1094 | hdmi->hdmi_mode = val; | ||
| 1095 | return 0; | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | DRM_ERROR("failed to set hdmi connector property\n"); | 1077 | DRM_ERROR("failed to set hdmi connector property\n"); |
| 1099 | return -EINVAL; | 1078 | return -EINVAL; |
| 1100 | } | 1079 | } |
| @@ -1114,11 +1093,6 @@ sti_hdmi_connector_get_property(struct drm_connector *connector, | |||
| 1114 | return 0; | 1093 | return 0; |
| 1115 | } | 1094 | } |
| 1116 | 1095 | ||
| 1117 | if (property == hdmi_connector->hdmi_mode_property) { | ||
| 1118 | *val = hdmi->hdmi_mode; | ||
| 1119 | return 0; | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | DRM_ERROR("failed to get hdmi connector property\n"); | 1096 | DRM_ERROR("failed to get hdmi connector property\n"); |
| 1123 | return -EINVAL; | 1097 | return -EINVAL; |
| 1124 | } | 1098 | } |
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h index 119bc3582ac7..407012350f1a 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.h +++ b/drivers/gpu/drm/sti/sti_hdmi.h | |||
| @@ -30,19 +30,6 @@ struct hdmi_audio_params { | |||
| 30 | struct hdmi_audio_infoframe cea; | 30 | struct hdmi_audio_infoframe cea; |
| 31 | }; | 31 | }; |
| 32 | 32 | ||
| 33 | /* values for the framing mode property */ | ||
| 34 | enum sti_hdmi_modes { | ||
| 35 | HDMI_MODE_HDMI, | ||
| 36 | HDMI_MODE_DVI, | ||
| 37 | }; | ||
| 38 | |||
| 39 | static const struct drm_prop_enum_list hdmi_mode_names[] = { | ||
| 40 | { HDMI_MODE_HDMI, "hdmi" }, | ||
| 41 | { HDMI_MODE_DVI, "dvi" }, | ||
| 42 | }; | ||
| 43 | |||
| 44 | #define DEFAULT_HDMI_MODE HDMI_MODE_HDMI | ||
| 45 | |||
| 46 | static const struct drm_prop_enum_list colorspace_mode_names[] = { | 33 | static const struct drm_prop_enum_list colorspace_mode_names[] = { |
| 47 | { HDMI_COLORSPACE_RGB, "rgb" }, | 34 | { HDMI_COLORSPACE_RGB, "rgb" }, |
| 48 | { HDMI_COLORSPACE_YUV422, "yuv422" }, | 35 | { HDMI_COLORSPACE_YUV422, "yuv422" }, |
| @@ -73,7 +60,7 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = { | |||
| 73 | * @reset: reset control of the hdmi phy | 60 | * @reset: reset control of the hdmi phy |
| 74 | * @ddc_adapt: i2c ddc adapter | 61 | * @ddc_adapt: i2c ddc adapter |
| 75 | * @colorspace: current colorspace selected | 62 | * @colorspace: current colorspace selected |
| 76 | * @hdmi_mode: select framing for HDMI or DVI | 63 | * @hdmi_monitor: true if HDMI monitor detected else DVI monitor assumed |
| 77 | * @audio_pdev: ASoC hdmi-codec platform device | 64 | * @audio_pdev: ASoC hdmi-codec platform device |
| 78 | * @audio: hdmi audio parameters. | 65 | * @audio: hdmi audio parameters. |
| 79 | * @drm_connector: hdmi connector | 66 | * @drm_connector: hdmi connector |
| @@ -98,7 +85,7 @@ struct sti_hdmi { | |||
| 98 | struct reset_control *reset; | 85 | struct reset_control *reset; |
| 99 | struct i2c_adapter *ddc_adapt; | 86 | struct i2c_adapter *ddc_adapt; |
| 100 | enum hdmi_colorspace colorspace; | 87 | enum hdmi_colorspace colorspace; |
| 101 | enum sti_hdmi_modes hdmi_mode; | 88 | bool hdmi_monitor; |
| 102 | struct platform_device *audio_pdev; | 89 | struct platform_device *audio_pdev; |
| 103 | struct hdmi_audio_params audio; | 90 | struct hdmi_audio_params audio; |
| 104 | struct drm_connector *drm_connector; | 91 | struct drm_connector *drm_connector; |
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 4376fd8a8e52..66f843148ef7 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c | |||
| @@ -1037,9 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, | |||
| 1037 | src_w = state->src_w >> 16; | 1037 | src_w = state->src_w >> 16; |
| 1038 | src_h = state->src_h >> 16; | 1038 | src_h = state->src_h >> 16; |
| 1039 | 1039 | ||
| 1040 | if (!sti_hqvdp_check_hw_scaling(hqvdp, mode, | 1040 | if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode, |
| 1041 | src_w, src_h, | 1041 | src_w, src_h, |
| 1042 | dst_w, dst_h)) { | 1042 | dst_w, dst_h)) { |
| 1043 | DRM_ERROR("Scaling beyond HW capabilities\n"); | 1043 | DRM_ERROR("Scaling beyond HW capabilities\n"); |
| 1044 | return -EINVAL; | 1044 | return -EINVAL; |
| 1045 | } | 1045 | } |
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h index 830a3c42d886..e64a00e61049 100644 --- a/drivers/gpu/drm/sti/sti_mixer.h +++ b/drivers/gpu/drm/sti/sti_mixer.h | |||
| @@ -28,7 +28,6 @@ enum sti_mixer_status { | |||
| 28 | * @regs: mixer registers | 28 | * @regs: mixer registers |
| 29 | * @id: id of the mixer | 29 | * @id: id of the mixer |
| 30 | * @drm_crtc: crtc object link to the mixer | 30 | * @drm_crtc: crtc object link to the mixer |
| 31 | * @pending_event: set if a flip event is pending on crtc | ||
| 32 | * @status: to know the status of the mixer | 31 | * @status: to know the status of the mixer |
| 33 | */ | 32 | */ |
| 34 | struct sti_mixer { | 33 | struct sti_mixer { |
| @@ -36,7 +35,6 @@ struct sti_mixer { | |||
| 36 | void __iomem *regs; | 35 | void __iomem *regs; |
| 37 | int id; | 36 | int id; |
| 38 | struct drm_crtc drm_crtc; | 37 | struct drm_crtc drm_crtc; |
| 39 | struct drm_pending_vblank_event *pending_event; | ||
| 40 | enum sti_mixer_status status; | 38 | enum sti_mixer_status status; |
| 41 | }; | 39 | }; |
| 42 | 40 | ||
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index c3d9c8ae14af..2dcba1d3a122 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include "sti_vtg.h" | 17 | #include "sti_vtg.h" |
| 18 | 18 | ||
| 19 | #define VTG_MODE_MASTER 0 | 19 | #define VTG_MODE_MASTER 0 |
| 20 | #define VTG_MODE_SLAVE_BY_EXT0 1 | ||
| 21 | 20 | ||
| 22 | /* registers offset */ | 21 | /* registers offset */ |
| 23 | #define VTG_MODE 0x0000 | 22 | #define VTG_MODE 0x0000 |
| @@ -132,7 +131,6 @@ struct sti_vtg_sync_params { | |||
| 132 | * @irq_status: store the IRQ status value | 131 | * @irq_status: store the IRQ status value |
| 133 | * @notifier_list: notifier callback | 132 | * @notifier_list: notifier callback |
| 134 | * @crtc: the CRTC for vblank event | 133 | * @crtc: the CRTC for vblank event |
| 135 | * @slave: slave vtg | ||
| 136 | * @link: List node to link the structure in lookup list | 134 | * @link: List node to link the structure in lookup list |
| 137 | */ | 135 | */ |
| 138 | struct sti_vtg { | 136 | struct sti_vtg { |
| @@ -144,7 +142,6 @@ struct sti_vtg { | |||
| 144 | u32 irq_status; | 142 | u32 irq_status; |
| 145 | struct raw_notifier_head notifier_list; | 143 | struct raw_notifier_head notifier_list; |
| 146 | struct drm_crtc *crtc; | 144 | struct drm_crtc *crtc; |
| 147 | struct sti_vtg *slave; | ||
| 148 | struct list_head link; | 145 | struct list_head link; |
| 149 | }; | 146 | }; |
| 150 | 147 | ||
| @@ -166,10 +163,6 @@ struct sti_vtg *of_vtg_find(struct device_node *np) | |||
| 166 | 163 | ||
| 167 | static void vtg_reset(struct sti_vtg *vtg) | 164 | static void vtg_reset(struct sti_vtg *vtg) |
| 168 | { | 165 | { |
| 169 | /* reset slave and then master */ | ||
| 170 | if (vtg->slave) | ||
| 171 | vtg_reset(vtg->slave); | ||
| 172 | |||
| 173 | writel(1, vtg->regs + VTG_DRST_AUTOC); | 166 | writel(1, vtg->regs + VTG_DRST_AUTOC); |
| 174 | } | 167 | } |
| 175 | 168 | ||
| @@ -259,10 +252,6 @@ static void vtg_set_mode(struct sti_vtg *vtg, | |||
| 259 | { | 252 | { |
| 260 | unsigned int i; | 253 | unsigned int i; |
| 261 | 254 | ||
| 262 | if (vtg->slave) | ||
| 263 | vtg_set_mode(vtg->slave, VTG_MODE_SLAVE_BY_EXT0, | ||
| 264 | vtg->sync_params, mode); | ||
| 265 | |||
| 266 | /* Set the number of clock cycles per line */ | 255 | /* Set the number of clock cycles per line */ |
| 267 | writel(mode->htotal, vtg->regs + VTG_CLKLN); | 256 | writel(mode->htotal, vtg->regs + VTG_CLKLN); |
| 268 | 257 | ||
| @@ -318,11 +307,7 @@ void sti_vtg_set_config(struct sti_vtg *vtg, | |||
| 318 | 307 | ||
| 319 | vtg_reset(vtg); | 308 | vtg_reset(vtg); |
| 320 | 309 | ||
| 321 | /* enable irq for the vtg vblank synchro */ | 310 | vtg_enable_irq(vtg); |
| 322 | if (vtg->slave) | ||
| 323 | vtg_enable_irq(vtg->slave); | ||
| 324 | else | ||
| 325 | vtg_enable_irq(vtg); | ||
| 326 | } | 311 | } |
| 327 | 312 | ||
| 328 | /** | 313 | /** |
| @@ -365,18 +350,12 @@ u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x) | |||
| 365 | int sti_vtg_register_client(struct sti_vtg *vtg, struct notifier_block *nb, | 350 | int sti_vtg_register_client(struct sti_vtg *vtg, struct notifier_block *nb, |
| 366 | struct drm_crtc *crtc) | 351 | struct drm_crtc *crtc) |
| 367 | { | 352 | { |
| 368 | if (vtg->slave) | ||
| 369 | return sti_vtg_register_client(vtg->slave, nb, crtc); | ||
| 370 | |||
| 371 | vtg->crtc = crtc; | 353 | vtg->crtc = crtc; |
| 372 | return raw_notifier_chain_register(&vtg->notifier_list, nb); | 354 | return raw_notifier_chain_register(&vtg->notifier_list, nb); |
| 373 | } | 355 | } |
| 374 | 356 | ||
| 375 | int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb) | 357 | int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb) |
| 376 | { | 358 | { |
| 377 | if (vtg->slave) | ||
| 378 | return sti_vtg_unregister_client(vtg->slave, nb); | ||
| 379 | |||
| 380 | return raw_notifier_chain_unregister(&vtg->notifier_list, nb); | 359 | return raw_notifier_chain_unregister(&vtg->notifier_list, nb); |
| 381 | } | 360 | } |
| 382 | 361 | ||
| @@ -410,7 +389,6 @@ static irqreturn_t vtg_irq(int irq, void *arg) | |||
| 410 | static int vtg_probe(struct platform_device *pdev) | 389 | static int vtg_probe(struct platform_device *pdev) |
| 411 | { | 390 | { |
| 412 | struct device *dev = &pdev->dev; | 391 | struct device *dev = &pdev->dev; |
| 413 | struct device_node *np; | ||
| 414 | struct sti_vtg *vtg; | 392 | struct sti_vtg *vtg; |
| 415 | struct resource *res; | 393 | struct resource *res; |
| 416 | int ret; | 394 | int ret; |
| @@ -434,29 +412,20 @@ static int vtg_probe(struct platform_device *pdev) | |||
| 434 | return -ENOMEM; | 412 | return -ENOMEM; |
| 435 | } | 413 | } |
| 436 | 414 | ||
| 437 | np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); | 415 | vtg->irq = platform_get_irq(pdev, 0); |
| 438 | if (np) { | 416 | if (vtg->irq < 0) { |
| 439 | vtg->slave = of_vtg_find(np); | 417 | DRM_ERROR("Failed to get VTG interrupt\n"); |
| 440 | of_node_put(np); | 418 | return vtg->irq; |
| 419 | } | ||
| 420 | |||
| 421 | RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list); | ||
| 441 | 422 | ||
| 442 | if (!vtg->slave) | 423 | ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, |
| 443 | return -EPROBE_DEFER; | 424 | vtg_irq_thread, IRQF_ONESHOT, |
| 444 | } else { | 425 | dev_name(dev), vtg); |
| 445 | vtg->irq = platform_get_irq(pdev, 0); | 426 | if (ret < 0) { |
| 446 | if (vtg->irq < 0) { | 427 | DRM_ERROR("Failed to register VTG interrupt\n"); |
| 447 | DRM_ERROR("Failed to get VTG interrupt\n"); | 428 | return ret; |
| 448 | return vtg->irq; | ||
| 449 | } | ||
| 450 | |||
| 451 | RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list); | ||
| 452 | |||
| 453 | ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, | ||
| 454 | vtg_irq_thread, IRQF_ONESHOT, | ||
| 455 | dev_name(dev), vtg); | ||
| 456 | if (ret < 0) { | ||
| 457 | DRM_ERROR("Failed to register VTG interrupt\n"); | ||
| 458 | return ret; | ||
| 459 | } | ||
| 460 | } | 429 | } |
| 461 | 430 | ||
| 462 | vtg_register(vtg); | 431 | vtg_register(vtg); |
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig new file mode 100644 index 000000000000..3504c53846da --- /dev/null +++ b/drivers/gpu/drm/tinydrm/Kconfig | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | menuconfig DRM_TINYDRM | ||
| 2 | tristate "Support for simple displays" | ||
| 3 | depends on DRM | ||
| 4 | select DRM_KMS_HELPER | ||
| 5 | select DRM_KMS_CMA_HELPER | ||
| 6 | select BACKLIGHT_LCD_SUPPORT | ||
| 7 | select BACKLIGHT_CLASS_DEVICE | ||
| 8 | help | ||
| 9 | Choose this option if you have a tinydrm supported display. | ||
| 10 | If M is selected the module will be called tinydrm. | ||
| 11 | |||
| 12 | config TINYDRM_MIPI_DBI | ||
| 13 | tristate | ||
| 14 | |||
| 15 | config TINYDRM_MI0283QT | ||
| 16 | tristate "DRM support for MI0283QT" | ||
| 17 | depends on DRM_TINYDRM && SPI | ||
| 18 | select TINYDRM_MIPI_DBI | ||
| 19 | help | ||
| 20 | DRM driver for the Multi-Inno MI0283QT display panel | ||
| 21 | If M is selected the module will be called mi0283qt. | ||
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile new file mode 100644 index 000000000000..7a3604cf4fc2 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | obj-$(CONFIG_DRM_TINYDRM) += core/ | ||
| 2 | |||
| 3 | # Controllers | ||
| 4 | obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o | ||
| 5 | |||
| 6 | # Displays | ||
| 7 | obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o | ||
diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile new file mode 100644 index 000000000000..fb221e6f8885 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/core/Makefile | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | tinydrm-y := tinydrm-core.o tinydrm-pipe.o tinydrm-helpers.o | ||
| 2 | |||
| 3 | obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o | ||
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c new file mode 100644 index 000000000000..6a257dd08ee0 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c | |||
| @@ -0,0 +1,376 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Noralf Trønnes | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <drm/drm_atomic.h> | ||
| 11 | #include <drm/drm_atomic_helper.h> | ||
| 12 | #include <drm/drm_crtc_helper.h> | ||
| 13 | #include <drm/tinydrm/tinydrm.h> | ||
| 14 | #include <linux/device.h> | ||
| 15 | #include <linux/dma-buf.h> | ||
| 16 | |||
| 17 | /** | ||
| 18 | * DOC: overview | ||
| 19 | * | ||
| 20 | * This library provides driver helpers for very simple display hardware. | ||
| 21 | * | ||
| 22 | * It is based on &drm_simple_display_pipe coupled with a &drm_connector which | ||
| 23 | * has only one fixed &drm_display_mode. The framebuffers are backed by the | ||
| 24 | * cma helper and have support for framebuffer flushing (dirty). | ||
| 25 | * fbdev support is also included. | ||
| 26 | * | ||
| 27 | */ | ||
| 28 | |||
| 29 | /** | ||
| 30 | * DOC: core | ||
| 31 | * | ||
| 32 | * The driver allocates &tinydrm_device, initializes it using | ||
| 33 | * devm_tinydrm_init(), sets up the pipeline using tinydrm_display_pipe_init() | ||
| 34 | * and registers the DRM device using devm_tinydrm_register(). | ||
| 35 | */ | ||
| 36 | |||
| 37 | /** | ||
| 38 | * tinydrm_lastclose - DRM lastclose helper | ||
| 39 | * @drm: DRM device | ||
| 40 | * | ||
| 41 | * This function ensures that fbdev is restored when drm_lastclose() is called | ||
| 42 | * on the last drm_release(). Drivers can use this as their | ||
| 43 | * &drm_driver->lastclose callback. | ||
| 44 | */ | ||
| 45 | void tinydrm_lastclose(struct drm_device *drm) | ||
| 46 | { | ||
| 47 | struct tinydrm_device *tdev = drm->dev_private; | ||
| 48 | |||
| 49 | DRM_DEBUG_KMS("\n"); | ||
| 50 | drm_fbdev_cma_restore_mode(tdev->fbdev_cma); | ||
| 51 | } | ||
| 52 | EXPORT_SYMBOL(tinydrm_lastclose); | ||
| 53 | |||
| 54 | /** | ||
| 55 | * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from | ||
| 56 | * another driver's scatter/gather table of pinned pages | ||
| 57 | * @drm: DRM device to import into | ||
| 58 | * @attach: DMA-BUF attachment | ||
| 59 | * @sgt: Scatter/gather table of pinned pages | ||
| 60 | * | ||
| 61 | * This function imports a scatter/gather table exported via DMA-BUF by | ||
| 62 | * another driver using drm_gem_cma_prime_import_sg_table(). It sets the | ||
| 63 | * kernel virtual address on the CMA object. Drivers should use this as their | ||
| 64 | * &drm_driver->gem_prime_import_sg_table callback if they need the virtual | ||
| 65 | * address. tinydrm_gem_cma_free_object() should be used in combination with | ||
| 66 | * this function. | ||
| 67 | * | ||
| 68 | * Returns: | ||
| 69 | * A pointer to a newly created GEM object or an ERR_PTR-encoded negative | ||
| 70 | * error code on failure. | ||
| 71 | */ | ||
| 72 | struct drm_gem_object * | ||
| 73 | tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, | ||
| 74 | struct dma_buf_attachment *attach, | ||
| 75 | struct sg_table *sgt) | ||
| 76 | { | ||
| 77 | struct drm_gem_cma_object *cma_obj; | ||
| 78 | struct drm_gem_object *obj; | ||
| 79 | void *vaddr; | ||
| 80 | |||
| 81 | vaddr = dma_buf_vmap(attach->dmabuf); | ||
| 82 | if (!vaddr) { | ||
| 83 | DRM_ERROR("Failed to vmap PRIME buffer\n"); | ||
| 84 | return ERR_PTR(-ENOMEM); | ||
| 85 | } | ||
| 86 | |||
| 87 | obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt); | ||
| 88 | if (IS_ERR(obj)) { | ||
| 89 | dma_buf_vunmap(attach->dmabuf, vaddr); | ||
| 90 | return obj; | ||
| 91 | } | ||
| 92 | |||
| 93 | cma_obj = to_drm_gem_cma_obj(obj); | ||
| 94 | cma_obj->vaddr = vaddr; | ||
| 95 | |||
| 96 | return obj; | ||
| 97 | } | ||
| 98 | EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table); | ||
| 99 | |||
| 100 | /** | ||
| 101 | * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM | ||
| 102 | * object | ||
| 103 | * @gem_obj: GEM object to free | ||
| 104 | * | ||
| 105 | * This function frees the backing memory of the CMA GEM object, cleans up the | ||
| 106 | * GEM object state and frees the memory used to store the object itself using | ||
| 107 | * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel | ||
| 108 | * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers | ||
| 109 | * can use this as their &drm_driver->gem_free_object callback. | ||
| 110 | */ | ||
| 111 | void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj) | ||
| 112 | { | ||
| 113 | if (gem_obj->import_attach) { | ||
| 114 | struct drm_gem_cma_object *cma_obj; | ||
| 115 | |||
| 116 | cma_obj = to_drm_gem_cma_obj(gem_obj); | ||
| 117 | dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr); | ||
| 118 | cma_obj->vaddr = NULL; | ||
| 119 | } | ||
| 120 | |||
| 121 | drm_gem_cma_free_object(gem_obj); | ||
| 122 | } | ||
| 123 | EXPORT_SYMBOL_GPL(tinydrm_gem_cma_free_object); | ||
| 124 | |||
| 125 | const struct file_operations tinydrm_fops = { | ||
| 126 | .owner = THIS_MODULE, | ||
| 127 | .open = drm_open, | ||
| 128 | .release = drm_release, | ||
| 129 | .unlocked_ioctl = drm_ioctl, | ||
| 130 | #ifdef CONFIG_COMPAT | ||
| 131 | .compat_ioctl = drm_compat_ioctl, | ||
| 132 | #endif | ||
| 133 | .poll = drm_poll, | ||
| 134 | .read = drm_read, | ||
| 135 | .llseek = no_llseek, | ||
| 136 | .mmap = drm_gem_cma_mmap, | ||
| 137 | }; | ||
| 138 | EXPORT_SYMBOL(tinydrm_fops); | ||
| 139 | |||
| 140 | static struct drm_framebuffer * | ||
| 141 | tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv, | ||
| 142 | const struct drm_mode_fb_cmd2 *mode_cmd) | ||
| 143 | { | ||
| 144 | struct tinydrm_device *tdev = drm->dev_private; | ||
| 145 | |||
| 146 | return drm_fb_cma_create_with_funcs(drm, file_priv, mode_cmd, | ||
| 147 | tdev->fb_funcs); | ||
| 148 | } | ||
| 149 | |||
| 150 | static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = { | ||
| 151 | .fb_create = tinydrm_fb_create, | ||
| 152 | .atomic_check = drm_atomic_helper_check, | ||
| 153 | .atomic_commit = drm_atomic_helper_commit, | ||
| 154 | }; | ||
| 155 | |||
| 156 | static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev, | ||
| 157 | const struct drm_framebuffer_funcs *fb_funcs, | ||
| 158 | struct drm_driver *driver) | ||
| 159 | { | ||
| 160 | struct drm_device *drm; | ||
| 161 | |||
| 162 | mutex_init(&tdev->dirty_lock); | ||
| 163 | tdev->fb_funcs = fb_funcs; | ||
| 164 | |||
| 165 | /* | ||
| 166 | * We don't embed drm_device, because that prevent us from using | ||
| 167 | * devm_kzalloc() to allocate tinydrm_device in the driver since | ||
| 168 | * drm_dev_unref() frees the structure. The devm_ functions provide | ||
| 169 | * for easy error handling. | ||
| 170 | */ | ||
| 171 | drm = drm_dev_alloc(driver, parent); | ||
| 172 | if (IS_ERR(drm)) | ||
| 173 | return PTR_ERR(drm); | ||
| 174 | |||
| 175 | tdev->drm = drm; | ||
| 176 | drm->dev_private = tdev; | ||
| 177 | drm_mode_config_init(drm); | ||
| 178 | drm->mode_config.funcs = &tinydrm_mode_config_funcs; | ||
| 179 | |||
| 180 | return 0; | ||
| 181 | } | ||
| 182 | |||
| 183 | static void tinydrm_fini(struct tinydrm_device *tdev) | ||
| 184 | { | ||
| 185 | drm_mode_config_cleanup(tdev->drm); | ||
| 186 | mutex_destroy(&tdev->dirty_lock); | ||
| 187 | tdev->drm->dev_private = NULL; | ||
| 188 | drm_dev_unref(tdev->drm); | ||
| 189 | } | ||
| 190 | |||
| 191 | static void devm_tinydrm_release(void *data) | ||
| 192 | { | ||
| 193 | tinydrm_fini(data); | ||
| 194 | } | ||
| 195 | |||
| 196 | /** | ||
| 197 | * devm_tinydrm_init - Initialize tinydrm device | ||
| 198 | * @parent: Parent device object | ||
| 199 | * @tdev: tinydrm device | ||
| 200 | * @fb_funcs: Framebuffer functions | ||
| 201 | * @driver: DRM driver | ||
| 202 | * | ||
| 203 | * This function initializes @tdev, the underlying DRM device and it's | ||
| 204 | * mode_config. Resources will be automatically freed on driver detach (devres) | ||
| 205 | * using drm_mode_config_cleanup() and drm_dev_unref(). | ||
| 206 | * | ||
| 207 | * Returns: | ||
| 208 | * Zero on success, negative error code on failure. | ||
| 209 | */ | ||
| 210 | int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, | ||
| 211 | const struct drm_framebuffer_funcs *fb_funcs, | ||
| 212 | struct drm_driver *driver) | ||
| 213 | { | ||
| 214 | int ret; | ||
| 215 | |||
| 216 | ret = tinydrm_init(parent, tdev, fb_funcs, driver); | ||
| 217 | if (ret) | ||
| 218 | return ret; | ||
| 219 | |||
| 220 | ret = devm_add_action(parent, devm_tinydrm_release, tdev); | ||
| 221 | if (ret) | ||
| 222 | tinydrm_fini(tdev); | ||
| 223 | |||
| 224 | return ret; | ||
| 225 | } | ||
| 226 | EXPORT_SYMBOL(devm_tinydrm_init); | ||
| 227 | |||
| 228 | static int tinydrm_register(struct tinydrm_device *tdev) | ||
| 229 | { | ||
| 230 | struct drm_device *drm = tdev->drm; | ||
| 231 | int bpp = drm->mode_config.preferred_depth; | ||
| 232 | struct drm_fbdev_cma *fbdev; | ||
| 233 | int ret; | ||
| 234 | |||
| 235 | ret = drm_dev_register(tdev->drm, 0); | ||
| 236 | if (ret) | ||
| 237 | return ret; | ||
| 238 | |||
| 239 | fbdev = drm_fbdev_cma_init_with_funcs(drm, bpp ? bpp : 32, | ||
| 240 | drm->mode_config.num_connector, | ||
| 241 | tdev->fb_funcs); | ||
| 242 | if (IS_ERR(fbdev)) | ||
| 243 | DRM_ERROR("Failed to initialize fbdev: %ld\n", PTR_ERR(fbdev)); | ||
| 244 | else | ||
| 245 | tdev->fbdev_cma = fbdev; | ||
| 246 | |||
| 247 | return 0; | ||
| 248 | } | ||
| 249 | |||
| 250 | static void tinydrm_unregister(struct tinydrm_device *tdev) | ||
| 251 | { | ||
| 252 | struct drm_fbdev_cma *fbdev_cma = tdev->fbdev_cma; | ||
| 253 | |||
| 254 | drm_crtc_force_disable_all(tdev->drm); | ||
| 255 | /* don't restore fbdev in lastclose, keep pipeline disabled */ | ||
| 256 | tdev->fbdev_cma = NULL; | ||
| 257 | drm_dev_unregister(tdev->drm); | ||
| 258 | if (fbdev_cma) | ||
| 259 | drm_fbdev_cma_fini(fbdev_cma); | ||
| 260 | } | ||
| 261 | |||
| 262 | static void devm_tinydrm_register_release(void *data) | ||
| 263 | { | ||
| 264 | tinydrm_unregister(data); | ||
| 265 | } | ||
| 266 | |||
| 267 | /** | ||
| 268 | * devm_tinydrm_register - Register tinydrm device | ||
| 269 | * @tdev: tinydrm device | ||
| 270 | * | ||
| 271 | * This function registers the underlying DRM device and fbdev. | ||
| 272 | * These resources will be automatically unregistered on driver detach (devres) | ||
| 273 | * and the display pipeline will be disabled. | ||
| 274 | * | ||
| 275 | * Returns: | ||
| 276 | * Zero on success, negative error code on failure. | ||
| 277 | */ | ||
| 278 | int devm_tinydrm_register(struct tinydrm_device *tdev) | ||
| 279 | { | ||
| 280 | struct device *dev = tdev->drm->dev; | ||
| 281 | int ret; | ||
| 282 | |||
| 283 | ret = tinydrm_register(tdev); | ||
| 284 | if (ret) | ||
| 285 | return ret; | ||
| 286 | |||
| 287 | ret = devm_add_action(dev, devm_tinydrm_register_release, tdev); | ||
| 288 | if (ret) | ||
| 289 | tinydrm_unregister(tdev); | ||
| 290 | |||
| 291 | return ret; | ||
| 292 | } | ||
| 293 | EXPORT_SYMBOL(devm_tinydrm_register); | ||
| 294 | |||
| 295 | /** | ||
| 296 | * tinydrm_shutdown - Shutdown tinydrm | ||
| 297 | * @tdev: tinydrm device | ||
| 298 | * | ||
| 299 | * This function makes sure that the display pipeline is disabled. | ||
| 300 | * Used by drivers in their shutdown callback to turn off the display | ||
| 301 | * on machine shutdown and reboot. | ||
| 302 | */ | ||
| 303 | void tinydrm_shutdown(struct tinydrm_device *tdev) | ||
| 304 | { | ||
| 305 | drm_crtc_force_disable_all(tdev->drm); | ||
| 306 | } | ||
| 307 | EXPORT_SYMBOL(tinydrm_shutdown); | ||
| 308 | |||
| 309 | /** | ||
| 310 | * tinydrm_suspend - Suspend tinydrm | ||
| 311 | * @tdev: tinydrm device | ||
| 312 | * | ||
| 313 | * Used in driver PM operations to suspend tinydrm. | ||
| 314 | * Suspends fbdev and DRM. | ||
| 315 | * Resume with tinydrm_resume(). | ||
| 316 | * | ||
| 317 | * Returns: | ||
| 318 | * Zero on success, negative error code on failure. | ||
| 319 | */ | ||
| 320 | int tinydrm_suspend(struct tinydrm_device *tdev) | ||
| 321 | { | ||
| 322 | struct drm_atomic_state *state; | ||
| 323 | |||
| 324 | if (tdev->suspend_state) { | ||
| 325 | DRM_ERROR("Failed to suspend: state already set\n"); | ||
| 326 | return -EINVAL; | ||
| 327 | } | ||
| 328 | |||
| 329 | drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1); | ||
| 330 | state = drm_atomic_helper_suspend(tdev->drm); | ||
| 331 | if (IS_ERR(state)) { | ||
| 332 | drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0); | ||
| 333 | return PTR_ERR(state); | ||
| 334 | } | ||
| 335 | |||
| 336 | tdev->suspend_state = state; | ||
| 337 | |||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | EXPORT_SYMBOL(tinydrm_suspend); | ||
| 341 | |||
| 342 | /** | ||
| 343 | * tinydrm_resume - Resume tinydrm | ||
| 344 | * @tdev: tinydrm device | ||
| 345 | * | ||
| 346 | * Used in driver PM operations to resume tinydrm. | ||
| 347 | * Suspend with tinydrm_suspend(). | ||
| 348 | * | ||
| 349 | * Returns: | ||
| 350 | * Zero on success, negative error code on failure. | ||
| 351 | */ | ||
| 352 | int tinydrm_resume(struct tinydrm_device *tdev) | ||
| 353 | { | ||
| 354 | struct drm_atomic_state *state = tdev->suspend_state; | ||
| 355 | int ret; | ||
| 356 | |||
| 357 | if (!state) { | ||
| 358 | DRM_ERROR("Failed to resume: state is not set\n"); | ||
| 359 | return -EINVAL; | ||
| 360 | } | ||
| 361 | |||
| 362 | tdev->suspend_state = NULL; | ||
| 363 | |||
| 364 | ret = drm_atomic_helper_resume(tdev->drm, state); | ||
| 365 | if (ret) { | ||
| 366 | DRM_ERROR("Error resuming state: %d\n", ret); | ||
| 367 | return ret; | ||
| 368 | } | ||
| 369 | |||
| 370 | drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0); | ||
| 371 | |||
| 372 | return 0; | ||
| 373 | } | ||
| 374 | EXPORT_SYMBOL(tinydrm_resume); | ||
| 375 | |||
| 376 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c new file mode 100644 index 000000000000..3ccda6c1e159 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c | |||
| @@ -0,0 +1,460 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Noralf Trønnes | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <drm/tinydrm/tinydrm.h> | ||
| 11 | #include <drm/tinydrm/tinydrm-helpers.h> | ||
| 12 | #include <linux/backlight.h> | ||
| 13 | #include <linux/pm.h> | ||
| 14 | #include <linux/spi/spi.h> | ||
| 15 | #include <linux/swab.h> | ||
| 16 | |||
| 17 | static unsigned int spi_max; | ||
| 18 | module_param(spi_max, uint, 0400); | ||
| 19 | MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size"); | ||
| 20 | |||
| 21 | /** | ||
| 22 | * tinydrm_merge_clips - Merge clip rectangles | ||
| 23 | * @dst: Destination clip rectangle | ||
| 24 | * @src: Source clip rectangle(s) | ||
| 25 | * @num_clips: Number of @src clip rectangles | ||
| 26 | * @flags: Dirty fb ioctl flags | ||
| 27 | * @max_width: Maximum width of @dst | ||
| 28 | * @max_height: Maximum height of @dst | ||
| 29 | * | ||
| 30 | * This function merges @src clip rectangle(s) into @dst. If @src is NULL, | ||
| 31 | * @max_width and @min_width is used to set a full @dst clip rectangle. | ||
| 32 | * | ||
| 33 | * Returns: | ||
| 34 | * true if it's a full clip, false otherwise | ||
| 35 | */ | ||
| 36 | bool tinydrm_merge_clips(struct drm_clip_rect *dst, | ||
| 37 | struct drm_clip_rect *src, unsigned int num_clips, | ||
| 38 | unsigned int flags, u32 max_width, u32 max_height) | ||
| 39 | { | ||
| 40 | unsigned int i; | ||
| 41 | |||
| 42 | if (!src || !num_clips) { | ||
| 43 | dst->x1 = 0; | ||
| 44 | dst->x2 = max_width; | ||
| 45 | dst->y1 = 0; | ||
| 46 | dst->y2 = max_height; | ||
| 47 | return true; | ||
| 48 | } | ||
| 49 | |||
| 50 | dst->x1 = ~0; | ||
| 51 | dst->y1 = ~0; | ||
| 52 | dst->x2 = 0; | ||
| 53 | dst->y2 = 0; | ||
| 54 | |||
| 55 | for (i = 0; i < num_clips; i++) { | ||
| 56 | if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) | ||
| 57 | i++; | ||
| 58 | dst->x1 = min(dst->x1, src[i].x1); | ||
| 59 | dst->x2 = max(dst->x2, src[i].x2); | ||
| 60 | dst->y1 = min(dst->y1, src[i].y1); | ||
| 61 | dst->y2 = max(dst->y2, src[i].y2); | ||
| 62 | } | ||
| 63 | |||
| 64 | if (dst->x2 > max_width || dst->y2 > max_height || | ||
| 65 | dst->x1 >= dst->x2 || dst->y1 >= dst->y2) { | ||
| 66 | DRM_DEBUG_KMS("Illegal clip: x1=%u, x2=%u, y1=%u, y2=%u\n", | ||
| 67 | dst->x1, dst->x2, dst->y1, dst->y2); | ||
| 68 | dst->x1 = 0; | ||
| 69 | dst->y1 = 0; | ||
| 70 | dst->x2 = max_width; | ||
| 71 | dst->y2 = max_height; | ||
| 72 | } | ||
| 73 | |||
| 74 | return (dst->x2 - dst->x1) == max_width && | ||
| 75 | (dst->y2 - dst->y1) == max_height; | ||
| 76 | } | ||
| 77 | EXPORT_SYMBOL(tinydrm_merge_clips); | ||
| 78 | |||
| 79 | /** | ||
| 80 | * tinydrm_memcpy - Copy clip buffer | ||
| 81 | * @dst: Destination buffer | ||
| 82 | * @vaddr: Source buffer | ||
| 83 | * @fb: DRM framebuffer | ||
| 84 | * @clip: Clip rectangle area to copy | ||
| 85 | */ | ||
| 86 | void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, | ||
| 87 | struct drm_clip_rect *clip) | ||
| 88 | { | ||
| 89 | unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0); | ||
| 90 | unsigned int pitch = fb->pitches[0]; | ||
| 91 | void *src = vaddr + (clip->y1 * pitch) + (clip->x1 * cpp); | ||
| 92 | size_t len = (clip->x2 - clip->x1) * cpp; | ||
| 93 | unsigned int y; | ||
| 94 | |||
| 95 | for (y = clip->y1; y < clip->y2; y++) { | ||
| 96 | memcpy(dst, src, len); | ||
| 97 | src += pitch; | ||
| 98 | dst += len; | ||
| 99 | } | ||
| 100 | } | ||
| 101 | EXPORT_SYMBOL(tinydrm_memcpy); | ||
| 102 | |||
| 103 | /** | ||
| 104 | * tinydrm_swab16 - Swap bytes into clip buffer | ||
| 105 | * @dst: RGB565 destination buffer | ||
| 106 | * @vaddr: RGB565 source buffer | ||
| 107 | * @fb: DRM framebuffer | ||
| 108 | * @clip: Clip rectangle area to copy | ||
| 109 | */ | ||
| 110 | void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, | ||
| 111 | struct drm_clip_rect *clip) | ||
| 112 | { | ||
| 113 | size_t len = (clip->x2 - clip->x1) * sizeof(u16); | ||
| 114 | unsigned int x, y; | ||
| 115 | u16 *src, *buf; | ||
| 116 | |||
| 117 | /* | ||
| 118 | * The cma memory is write-combined so reads are uncached. | ||
| 119 | * Speed up by fetching one line at a time. | ||
| 120 | */ | ||
| 121 | buf = kmalloc(len, GFP_KERNEL); | ||
| 122 | if (!buf) | ||
| 123 | return; | ||
| 124 | |||
| 125 | for (y = clip->y1; y < clip->y2; y++) { | ||
| 126 | src = vaddr + (y * fb->pitches[0]); | ||
| 127 | src += clip->x1; | ||
| 128 | memcpy(buf, src, len); | ||
| 129 | src = buf; | ||
| 130 | for (x = clip->x1; x < clip->x2; x++) | ||
| 131 | *dst++ = swab16(*src++); | ||
| 132 | } | ||
| 133 | |||
| 134 | kfree(buf); | ||
| 135 | } | ||
| 136 | EXPORT_SYMBOL(tinydrm_swab16); | ||
| 137 | |||
| 138 | /** | ||
| 139 | * tinydrm_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer | ||
| 140 | * @dst: RGB565 destination buffer | ||
| 141 | * @vaddr: XRGB8888 source buffer | ||
| 142 | * @fb: DRM framebuffer | ||
| 143 | * @clip: Clip rectangle area to copy | ||
| 144 | * @swap: Swap bytes | ||
| 145 | * | ||
| 146 | * Drivers can use this function for RGB565 devices that don't natively | ||
| 147 | * support XRGB8888. | ||
| 148 | */ | ||
| 149 | void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr, | ||
| 150 | struct drm_framebuffer *fb, | ||
| 151 | struct drm_clip_rect *clip, bool swap) | ||
| 152 | { | ||
| 153 | size_t len = (clip->x2 - clip->x1) * sizeof(u32); | ||
| 154 | unsigned int x, y; | ||
| 155 | u32 *src, *buf; | ||
| 156 | u16 val16; | ||
| 157 | |||
| 158 | buf = kmalloc(len, GFP_KERNEL); | ||
| 159 | if (!buf) | ||
| 160 | return; | ||
| 161 | |||
| 162 | for (y = clip->y1; y < clip->y2; y++) { | ||
| 163 | src = vaddr + (y * fb->pitches[0]); | ||
| 164 | src += clip->x1; | ||
| 165 | memcpy(buf, src, len); | ||
| 166 | src = buf; | ||
| 167 | for (x = clip->x1; x < clip->x2; x++) { | ||
| 168 | val16 = ((*src & 0x00F80000) >> 8) | | ||
| 169 | ((*src & 0x0000FC00) >> 5) | | ||
| 170 | ((*src & 0x000000F8) >> 3); | ||
| 171 | src++; | ||
| 172 | if (swap) | ||
| 173 | *dst++ = swab16(val16); | ||
| 174 | else | ||
| 175 | *dst++ = val16; | ||
| 176 | } | ||
| 177 | } | ||
| 178 | |||
| 179 | kfree(buf); | ||
| 180 | } | ||
| 181 | EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565); | ||
| 182 | |||
| 183 | /** | ||
| 184 | * tinydrm_of_find_backlight - Find backlight device in device-tree | ||
| 185 | * @dev: Device | ||
| 186 | * | ||
| 187 | * This function looks for a DT node pointed to by a property named 'backlight' | ||
| 188 | * and uses of_find_backlight_by_node() to get the backlight device. | ||
| 189 | * Additionally if the brightness property is zero, it is set to | ||
| 190 | * max_brightness. | ||
| 191 | * | ||
| 192 | * Returns: | ||
| 193 | * NULL if there's no backlight property. | ||
| 194 | * Error pointer -EPROBE_DEFER if the DT node is found, but no backlight device | ||
| 195 | * is found. | ||
| 196 | * If the backlight device is found, a pointer to the structure is returned. | ||
| 197 | */ | ||
| 198 | struct backlight_device *tinydrm_of_find_backlight(struct device *dev) | ||
| 199 | { | ||
| 200 | struct backlight_device *backlight; | ||
| 201 | struct device_node *np; | ||
| 202 | |||
| 203 | np = of_parse_phandle(dev->of_node, "backlight", 0); | ||
| 204 | if (!np) | ||
| 205 | return NULL; | ||
| 206 | |||
| 207 | backlight = of_find_backlight_by_node(np); | ||
| 208 | of_node_put(np); | ||
| 209 | |||
| 210 | if (!backlight) | ||
| 211 | return ERR_PTR(-EPROBE_DEFER); | ||
| 212 | |||
| 213 | if (!backlight->props.brightness) { | ||
| 214 | backlight->props.brightness = backlight->props.max_brightness; | ||
| 215 | DRM_DEBUG_KMS("Backlight brightness set to %d\n", | ||
| 216 | backlight->props.brightness); | ||
| 217 | } | ||
| 218 | |||
| 219 | return backlight; | ||
| 220 | } | ||
| 221 | EXPORT_SYMBOL(tinydrm_of_find_backlight); | ||
| 222 | |||
| 223 | /** | ||
| 224 | * tinydrm_enable_backlight - Enable backlight helper | ||
| 225 | * @backlight: Backlight device | ||
| 226 | * | ||
| 227 | * Returns: | ||
| 228 | * Zero on success, negative error code on failure. | ||
| 229 | */ | ||
| 230 | int tinydrm_enable_backlight(struct backlight_device *backlight) | ||
| 231 | { | ||
| 232 | unsigned int old_state; | ||
| 233 | int ret; | ||
| 234 | |||
| 235 | if (!backlight) | ||
| 236 | return 0; | ||
| 237 | |||
| 238 | old_state = backlight->props.state; | ||
| 239 | backlight->props.state &= ~BL_CORE_FBBLANK; | ||
| 240 | DRM_DEBUG_KMS("Backlight state: 0x%x -> 0x%x\n", old_state, | ||
| 241 | backlight->props.state); | ||
| 242 | |||
| 243 | ret = backlight_update_status(backlight); | ||
| 244 | if (ret) | ||
| 245 | DRM_ERROR("Failed to enable backlight %d\n", ret); | ||
| 246 | |||
| 247 | return ret; | ||
| 248 | } | ||
| 249 | EXPORT_SYMBOL(tinydrm_enable_backlight); | ||
| 250 | |||
| 251 | /** | ||
| 252 | * tinydrm_disable_backlight - Disable backlight helper | ||
| 253 | * @backlight: Backlight device | ||
| 254 | * | ||
| 255 | * Returns: | ||
| 256 | * Zero on success, negative error code on failure. | ||
| 257 | */ | ||
| 258 | int tinydrm_disable_backlight(struct backlight_device *backlight) | ||
| 259 | { | ||
| 260 | unsigned int old_state; | ||
| 261 | int ret; | ||
| 262 | |||
| 263 | if (!backlight) | ||
| 264 | return 0; | ||
| 265 | |||
| 266 | old_state = backlight->props.state; | ||
| 267 | backlight->props.state |= BL_CORE_FBBLANK; | ||
| 268 | DRM_DEBUG_KMS("Backlight state: 0x%x -> 0x%x\n", old_state, | ||
| 269 | backlight->props.state); | ||
| 270 | ret = backlight_update_status(backlight); | ||
| 271 | if (ret) | ||
| 272 | DRM_ERROR("Failed to disable backlight %d\n", ret); | ||
| 273 | |||
| 274 | return ret; | ||
| 275 | } | ||
| 276 | EXPORT_SYMBOL(tinydrm_disable_backlight); | ||
| 277 | |||
| 278 | #if IS_ENABLED(CONFIG_SPI) | ||
| 279 | |||
| 280 | /** | ||
| 281 | * tinydrm_spi_max_transfer_size - Determine max SPI transfer size | ||
| 282 | * @spi: SPI device | ||
| 283 | * @max_len: Maximum buffer size needed (optional) | ||
| 284 | * | ||
| 285 | * This function returns the maximum size to use for SPI transfers. It checks | ||
| 286 | * the SPI master, the optional @max_len and the module parameter spi_max and | ||
| 287 | * returns the smallest. | ||
| 288 | * | ||
| 289 | * Returns: | ||
| 290 | * Maximum size for SPI transfers | ||
| 291 | */ | ||
| 292 | size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len) | ||
| 293 | { | ||
| 294 | size_t ret; | ||
| 295 | |||
| 296 | ret = min(spi_max_transfer_size(spi), spi->master->max_dma_len); | ||
| 297 | if (max_len) | ||
| 298 | ret = min(ret, max_len); | ||
| 299 | if (spi_max) | ||
| 300 | ret = min_t(size_t, ret, spi_max); | ||
| 301 | ret &= ~0x3; | ||
| 302 | if (ret < 4) | ||
| 303 | ret = 4; | ||
| 304 | |||
| 305 | return ret; | ||
| 306 | } | ||
| 307 | EXPORT_SYMBOL(tinydrm_spi_max_transfer_size); | ||
| 308 | |||
| 309 | /** | ||
| 310 | * tinydrm_spi_bpw_supported - Check if bits per word is supported | ||
| 311 | * @spi: SPI device | ||
| 312 | * @bpw: Bits per word | ||
| 313 | * | ||
| 314 | * This function checks to see if the SPI master driver supports @bpw. | ||
| 315 | * | ||
| 316 | * Returns: | ||
| 317 | * True if @bpw is supported, false otherwise. | ||
| 318 | */ | ||
| 319 | bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw) | ||
| 320 | { | ||
| 321 | u32 bpw_mask = spi->master->bits_per_word_mask; | ||
| 322 | |||
| 323 | if (bpw == 8) | ||
| 324 | return true; | ||
| 325 | |||
| 326 | if (!bpw_mask) { | ||
| 327 | dev_warn_once(&spi->dev, | ||
| 328 | "bits_per_word_mask not set, assume 8-bit only\n"); | ||
| 329 | return false; | ||
| 330 | } | ||
| 331 | |||
| 332 | if (bpw_mask & SPI_BPW_MASK(bpw)) | ||
| 333 | return true; | ||
| 334 | |||
| 335 | return false; | ||
| 336 | } | ||
| 337 | EXPORT_SYMBOL(tinydrm_spi_bpw_supported); | ||
| 338 | |||
| 339 | static void | ||
| 340 | tinydrm_dbg_spi_print(struct spi_device *spi, struct spi_transfer *tr, | ||
| 341 | const void *buf, int idx, bool tx) | ||
| 342 | { | ||
| 343 | u32 speed_hz = tr->speed_hz ? tr->speed_hz : spi->max_speed_hz; | ||
| 344 | char linebuf[3 * 32]; | ||
| 345 | |||
| 346 | hex_dump_to_buffer(buf, tr->len, 16, | ||
| 347 | DIV_ROUND_UP(tr->bits_per_word, 8), | ||
| 348 | linebuf, sizeof(linebuf), false); | ||
| 349 | |||
| 350 | printk(KERN_DEBUG | ||
| 351 | " tr(%i): speed=%u%s, bpw=%i, len=%u, %s_buf=[%s%s]\n", idx, | ||
| 352 | speed_hz > 1000000 ? speed_hz / 1000000 : speed_hz / 1000, | ||
| 353 | speed_hz > 1000000 ? "MHz" : "kHz", tr->bits_per_word, tr->len, | ||
| 354 | tx ? "tx" : "rx", linebuf, tr->len > 16 ? " ..." : ""); | ||
| 355 | } | ||
| 356 | |||
| 357 | /* called through tinydrm_dbg_spi_message() */ | ||
| 358 | void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m) | ||
| 359 | { | ||
| 360 | struct spi_transfer *tmp; | ||
| 361 | struct list_head *pos; | ||
| 362 | int i = 0; | ||
| 363 | |||
| 364 | list_for_each(pos, &m->transfers) { | ||
| 365 | tmp = list_entry(pos, struct spi_transfer, transfer_list); | ||
| 366 | |||
| 367 | if (tmp->tx_buf) | ||
| 368 | tinydrm_dbg_spi_print(spi, tmp, tmp->tx_buf, i, true); | ||
| 369 | if (tmp->rx_buf) | ||
| 370 | tinydrm_dbg_spi_print(spi, tmp, tmp->rx_buf, i, false); | ||
| 371 | i++; | ||
| 372 | } | ||
| 373 | } | ||
| 374 | EXPORT_SYMBOL(_tinydrm_dbg_spi_message); | ||
| 375 | |||
| 376 | /** | ||
| 377 | * tinydrm_spi_transfer - SPI transfer helper | ||
| 378 | * @spi: SPI device | ||
| 379 | * @speed_hz: Override speed (optional) | ||
| 380 | * @header: Optional header transfer | ||
| 381 | * @bpw: Bits per word | ||
| 382 | * @buf: Buffer to transfer | ||
| 383 | * @len: Buffer length | ||
| 384 | * | ||
| 385 | * This SPI transfer helper breaks up the transfer of @buf into chunks which | ||
| 386 | * the SPI master driver can handle. If the machine is Little Endian and the | ||
| 387 | * SPI master driver doesn't support 16 bits per word, it swaps the bytes and | ||
| 388 | * does a 8-bit transfer. | ||
| 389 | * If @header is set, it is prepended to each SPI message. | ||
| 390 | * | ||
| 391 | * Returns: | ||
| 392 | * Zero on success, negative error code on failure. | ||
| 393 | */ | ||
| 394 | int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz, | ||
| 395 | struct spi_transfer *header, u8 bpw, const void *buf, | ||
| 396 | size_t len) | ||
| 397 | { | ||
| 398 | struct spi_transfer tr = { | ||
| 399 | .bits_per_word = bpw, | ||
| 400 | .speed_hz = speed_hz, | ||
| 401 | }; | ||
| 402 | struct spi_message m; | ||
| 403 | u16 *swap_buf = NULL; | ||
| 404 | size_t max_chunk; | ||
| 405 | size_t chunk; | ||
| 406 | int ret = 0; | ||
| 407 | |||
| 408 | if (WARN_ON_ONCE(bpw != 8 && bpw != 16)) | ||
| 409 | return -EINVAL; | ||
| 410 | |||
| 411 | max_chunk = tinydrm_spi_max_transfer_size(spi, 0); | ||
| 412 | |||
| 413 | if (drm_debug & DRM_UT_DRIVER) | ||
| 414 | pr_debug("[drm:%s] bpw=%u, max_chunk=%zu, transfers:\n", | ||
| 415 | __func__, bpw, max_chunk); | ||
| 416 | |||
| 417 | if (bpw == 16 && !tinydrm_spi_bpw_supported(spi, 16)) { | ||
| 418 | tr.bits_per_word = 8; | ||
| 419 | if (tinydrm_machine_little_endian()) { | ||
| 420 | swap_buf = kmalloc(min(len, max_chunk), GFP_KERNEL); | ||
| 421 | if (!swap_buf) | ||
| 422 | return -ENOMEM; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | |||
| 426 | spi_message_init(&m); | ||
| 427 | if (header) | ||
| 428 | spi_message_add_tail(header, &m); | ||
| 429 | spi_message_add_tail(&tr, &m); | ||
| 430 | |||
| 431 | while (len) { | ||
| 432 | chunk = min(len, max_chunk); | ||
| 433 | |||
| 434 | tr.tx_buf = buf; | ||
| 435 | tr.len = chunk; | ||
| 436 | |||
| 437 | if (swap_buf) { | ||
| 438 | const u16 *buf16 = buf; | ||
| 439 | unsigned int i; | ||
| 440 | |||
| 441 | for (i = 0; i < chunk / 2; i++) | ||
| 442 | swap_buf[i] = swab16(buf16[i]); | ||
| 443 | |||
| 444 | tr.tx_buf = swap_buf; | ||
| 445 | } | ||
| 446 | |||
| 447 | buf += chunk; | ||
| 448 | len -= chunk; | ||
| 449 | |||
| 450 | tinydrm_dbg_spi_message(spi, &m); | ||
| 451 | ret = spi_sync(spi, &m); | ||
| 452 | if (ret) | ||
| 453 | return ret; | ||
| 454 | }; | ||
| 455 | |||
| 456 | return 0; | ||
| 457 | } | ||
| 458 | EXPORT_SYMBOL(tinydrm_spi_transfer); | ||
| 459 | |||
| 460 | #endif /* CONFIG_SPI */ | ||
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c new file mode 100644 index 000000000000..ec43fb7ad9e4 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c | |||
| @@ -0,0 +1,234 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Noralf Trønnes | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <drm/drm_atomic_helper.h> | ||
| 11 | #include <drm/drm_crtc_helper.h> | ||
| 12 | #include <drm/drm_modes.h> | ||
| 13 | #include <drm/tinydrm/tinydrm.h> | ||
| 14 | |||
| 15 | struct tinydrm_connector { | ||
| 16 | struct drm_connector base; | ||
| 17 | const struct drm_display_mode *mode; | ||
| 18 | }; | ||
| 19 | |||
| 20 | static inline struct tinydrm_connector * | ||
| 21 | to_tinydrm_connector(struct drm_connector *connector) | ||
| 22 | { | ||
| 23 | return container_of(connector, struct tinydrm_connector, base); | ||
| 24 | } | ||
| 25 | |||
| 26 | static int tinydrm_connector_get_modes(struct drm_connector *connector) | ||
| 27 | { | ||
| 28 | struct tinydrm_connector *tconn = to_tinydrm_connector(connector); | ||
| 29 | struct drm_display_mode *mode; | ||
| 30 | |||
| 31 | mode = drm_mode_duplicate(connector->dev, tconn->mode); | ||
| 32 | if (!mode) { | ||
| 33 | DRM_ERROR("Failed to duplicate mode\n"); | ||
| 34 | return 0; | ||
| 35 | } | ||
| 36 | |||
| 37 | if (mode->name[0] == '\0') | ||
| 38 | drm_mode_set_name(mode); | ||
| 39 | |||
| 40 | mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
| 41 | drm_mode_probed_add(connector, mode); | ||
| 42 | |||
| 43 | if (mode->width_mm) { | ||
| 44 | connector->display_info.width_mm = mode->width_mm; | ||
| 45 | connector->display_info.height_mm = mode->height_mm; | ||
| 46 | } | ||
| 47 | |||
| 48 | return 1; | ||
| 49 | } | ||
| 50 | |||
| 51 | static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = { | ||
| 52 | .get_modes = tinydrm_connector_get_modes, | ||
| 53 | .best_encoder = drm_atomic_helper_best_encoder, | ||
| 54 | }; | ||
| 55 | |||
| 56 | static enum drm_connector_status | ||
| 57 | tinydrm_connector_detect(struct drm_connector *connector, bool force) | ||
| 58 | { | ||
| 59 | if (drm_device_is_unplugged(connector->dev)) | ||
| 60 | return connector_status_disconnected; | ||
| 61 | |||
| 62 | return connector->status; | ||
| 63 | } | ||
| 64 | |||
| 65 | static void tinydrm_connector_destroy(struct drm_connector *connector) | ||
| 66 | { | ||
| 67 | struct tinydrm_connector *tconn = to_tinydrm_connector(connector); | ||
| 68 | |||
| 69 | drm_connector_cleanup(connector); | ||
| 70 | kfree(tconn); | ||
| 71 | } | ||
| 72 | |||
| 73 | static const struct drm_connector_funcs tinydrm_connector_funcs = { | ||
| 74 | .dpms = drm_atomic_helper_connector_dpms, | ||
| 75 | .reset = drm_atomic_helper_connector_reset, | ||
| 76 | .detect = tinydrm_connector_detect, | ||
| 77 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
| 78 | .destroy = tinydrm_connector_destroy, | ||
| 79 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | ||
| 80 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | ||
| 81 | }; | ||
| 82 | |||
| 83 | struct drm_connector * | ||
| 84 | tinydrm_connector_create(struct drm_device *drm, | ||
| 85 | const struct drm_display_mode *mode, | ||
| 86 | int connector_type) | ||
| 87 | { | ||
| 88 | struct tinydrm_connector *tconn; | ||
| 89 | struct drm_connector *connector; | ||
| 90 | int ret; | ||
| 91 | |||
| 92 | tconn = kzalloc(sizeof(*tconn), GFP_KERNEL); | ||
| 93 | if (!tconn) | ||
| 94 | return ERR_PTR(-ENOMEM); | ||
| 95 | |||
| 96 | tconn->mode = mode; | ||
| 97 | connector = &tconn->base; | ||
| 98 | |||
| 99 | drm_connector_helper_add(connector, &tinydrm_connector_hfuncs); | ||
| 100 | ret = drm_connector_init(drm, connector, &tinydrm_connector_funcs, | ||
| 101 | connector_type); | ||
| 102 | if (ret) { | ||
| 103 | kfree(tconn); | ||
| 104 | return ERR_PTR(ret); | ||
| 105 | } | ||
| 106 | |||
| 107 | connector->status = connector_status_connected; | ||
| 108 | |||
| 109 | return connector; | ||
| 110 | } | ||
| 111 | |||
| 112 | /** | ||
| 113 | * tinydrm_display_pipe_update - Display pipe update helper | ||
| 114 | * @pipe: Simple display pipe | ||
| 115 | * @old_state: Old plane state | ||
| 116 | * | ||
| 117 | * This function does a full framebuffer flush if the plane framebuffer | ||
| 118 | * has changed. It also handles vblank events. Drivers can use this as their | ||
| 119 | * &drm_simple_display_pipe_funcs->update callback. | ||
| 120 | */ | ||
| 121 | void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, | ||
| 122 | struct drm_plane_state *old_state) | ||
| 123 | { | ||
| 124 | struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); | ||
| 125 | struct drm_framebuffer *fb = pipe->plane.state->fb; | ||
| 126 | struct drm_crtc *crtc = &tdev->pipe.crtc; | ||
| 127 | |||
| 128 | if (fb && (fb != old_state->fb)) { | ||
| 129 | pipe->plane.fb = fb; | ||
| 130 | if (fb->funcs->dirty) | ||
| 131 | fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0); | ||
| 132 | } | ||
| 133 | |||
| 134 | if (crtc->state->event) { | ||
| 135 | spin_lock_irq(&crtc->dev->event_lock); | ||
| 136 | drm_crtc_send_vblank_event(crtc, crtc->state->event); | ||
| 137 | spin_unlock_irq(&crtc->dev->event_lock); | ||
| 138 | crtc->state->event = NULL; | ||
| 139 | } | ||
| 140 | } | ||
| 141 | EXPORT_SYMBOL(tinydrm_display_pipe_update); | ||
| 142 | |||
| 143 | /** | ||
| 144 | * tinydrm_display_pipe_prepare_fb - Display pipe prepare_fb helper | ||
| 145 | * @pipe: Simple display pipe | ||
| 146 | * @plane_state: Plane state | ||
| 147 | * | ||
| 148 | * This function uses drm_fb_cma_prepare_fb() to check if the plane FB has an | ||
| 149 | * dma-buf attached, extracts the exclusive fence and attaches it to plane | ||
| 150 | * state for the atomic helper to wait on. Drivers can use this as their | ||
| 151 | * &drm_simple_display_pipe_funcs->prepare_fb callback. | ||
| 152 | */ | ||
| 153 | int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, | ||
| 154 | struct drm_plane_state *plane_state) | ||
| 155 | { | ||
| 156 | return drm_fb_cma_prepare_fb(&pipe->plane, plane_state); | ||
| 157 | } | ||
| 158 | EXPORT_SYMBOL(tinydrm_display_pipe_prepare_fb); | ||
| 159 | |||
| 160 | static int tinydrm_rotate_mode(struct drm_display_mode *mode, | ||
| 161 | unsigned int rotation) | ||
| 162 | { | ||
| 163 | if (rotation == 0 || rotation == 180) { | ||
| 164 | return 0; | ||
| 165 | } else if (rotation == 90 || rotation == 270) { | ||
| 166 | swap(mode->hdisplay, mode->vdisplay); | ||
| 167 | swap(mode->hsync_start, mode->vsync_start); | ||
| 168 | swap(mode->hsync_end, mode->vsync_end); | ||
| 169 | swap(mode->htotal, mode->vtotal); | ||
| 170 | swap(mode->width_mm, mode->height_mm); | ||
| 171 | return 0; | ||
| 172 | } else { | ||
| 173 | return -EINVAL; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | |||
| 177 | /** | ||
| 178 | * tinydrm_display_pipe_init - Initialize display pipe | ||
| 179 | * @tdev: tinydrm device | ||
| 180 | * @funcs: Display pipe functions | ||
| 181 | * @connector_type: Connector type | ||
| 182 | * @formats: Array of supported formats (DRM_FORMAT\_\*) | ||
| 183 | * @format_count: Number of elements in @formats | ||
| 184 | * @mode: Supported mode | ||
| 185 | * @rotation: Initial @mode rotation in degrees Counter Clock Wise | ||
| 186 | * | ||
| 187 | * This function sets up a &drm_simple_display_pipe with a &drm_connector that | ||
| 188 | * has one fixed &drm_display_mode which is rotated according to @rotation. | ||
| 189 | * | ||
| 190 | * Returns: | ||
| 191 | * Zero on success, negative error code on failure. | ||
| 192 | */ | ||
| 193 | int | ||
| 194 | tinydrm_display_pipe_init(struct tinydrm_device *tdev, | ||
| 195 | const struct drm_simple_display_pipe_funcs *funcs, | ||
| 196 | int connector_type, | ||
| 197 | const uint32_t *formats, | ||
| 198 | unsigned int format_count, | ||
| 199 | const struct drm_display_mode *mode, | ||
| 200 | unsigned int rotation) | ||
| 201 | { | ||
| 202 | struct drm_device *drm = tdev->drm; | ||
| 203 | struct drm_display_mode *mode_copy; | ||
| 204 | struct drm_connector *connector; | ||
| 205 | int ret; | ||
| 206 | |||
| 207 | mode_copy = devm_kmalloc(drm->dev, sizeof(*mode_copy), GFP_KERNEL); | ||
| 208 | if (!mode_copy) | ||
| 209 | return -ENOMEM; | ||
| 210 | |||
| 211 | *mode_copy = *mode; | ||
| 212 | ret = tinydrm_rotate_mode(mode_copy, rotation); | ||
| 213 | if (ret) { | ||
| 214 | DRM_ERROR("Illegal rotation value %u\n", rotation); | ||
| 215 | return -EINVAL; | ||
| 216 | } | ||
| 217 | |||
| 218 | drm->mode_config.min_width = mode_copy->hdisplay; | ||
| 219 | drm->mode_config.max_width = mode_copy->hdisplay; | ||
| 220 | drm->mode_config.min_height = mode_copy->vdisplay; | ||
| 221 | drm->mode_config.max_height = mode_copy->vdisplay; | ||
| 222 | |||
| 223 | connector = tinydrm_connector_create(drm, mode_copy, connector_type); | ||
| 224 | if (IS_ERR(connector)) | ||
| 225 | return PTR_ERR(connector); | ||
| 226 | |||
| 227 | ret = drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats, | ||
| 228 | format_count, connector); | ||
| 229 | if (ret) | ||
| 230 | return ret; | ||
| 231 | |||
| 232 | return 0; | ||
| 233 | } | ||
| 234 | EXPORT_SYMBOL(tinydrm_display_pipe_init); | ||
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c new file mode 100644 index 000000000000..b29fe86158f7 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/mi0283qt.c | |||
| @@ -0,0 +1,279 @@ | |||
| 1 | /* | ||
| 2 | * DRM driver for Multi-Inno MI0283QT panels | ||
| 3 | * | ||
| 4 | * Copyright 2016 Noralf Trønnes | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <drm/tinydrm/ili9341.h> | ||
| 13 | #include <drm/tinydrm/mipi-dbi.h> | ||
| 14 | #include <drm/tinydrm/tinydrm-helpers.h> | ||
| 15 | #include <linux/delay.h> | ||
| 16 | #include <linux/gpio/consumer.h> | ||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/property.h> | ||
| 19 | #include <linux/regulator/consumer.h> | ||
| 20 | #include <linux/spi/spi.h> | ||
| 21 | #include <video/mipi_display.h> | ||
| 22 | |||
| 23 | static int mi0283qt_init(struct mipi_dbi *mipi) | ||
| 24 | { | ||
| 25 | struct tinydrm_device *tdev = &mipi->tinydrm; | ||
| 26 | struct device *dev = tdev->drm->dev; | ||
| 27 | u8 addr_mode; | ||
| 28 | int ret; | ||
| 29 | |||
| 30 | DRM_DEBUG_KMS("\n"); | ||
| 31 | |||
| 32 | ret = regulator_enable(mipi->regulator); | ||
| 33 | if (ret) { | ||
| 34 | dev_err(dev, "Failed to enable regulator %d\n", ret); | ||
| 35 | return ret; | ||
| 36 | } | ||
| 37 | |||
| 38 | /* Avoid flicker by skipping setup if the bootloader has done it */ | ||
| 39 | if (mipi_dbi_display_is_on(mipi)) | ||
| 40 | return 0; | ||
| 41 | |||
| 42 | mipi_dbi_hw_reset(mipi); | ||
| 43 | ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET); | ||
| 44 | if (ret) { | ||
| 45 | dev_err(dev, "Error sending command %d\n", ret); | ||
| 46 | regulator_disable(mipi->regulator); | ||
| 47 | return ret; | ||
| 48 | } | ||
| 49 | |||
| 50 | msleep(20); | ||
| 51 | |||
| 52 | mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF); | ||
| 53 | |||
| 54 | mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0x83, 0x30); | ||
| 55 | mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81); | ||
| 56 | mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x01, 0x79); | ||
| 57 | mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02); | ||
| 58 | mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20); | ||
| 59 | mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00); | ||
| 60 | |||
| 61 | /* Power Control */ | ||
| 62 | mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x26); | ||
| 63 | mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x11); | ||
| 64 | /* VCOM */ | ||
| 65 | mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x35, 0x3e); | ||
| 66 | mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0xbe); | ||
| 67 | |||
| 68 | /* Memory Access Control */ | ||
| 69 | mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); | ||
| 70 | |||
| 71 | switch (mipi->rotation) { | ||
| 72 | default: | ||
| 73 | addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY | | ||
| 74 | ILI9341_MADCTL_MX; | ||
| 75 | break; | ||
| 76 | case 90: | ||
| 77 | addr_mode = ILI9341_MADCTL_MY; | ||
| 78 | break; | ||
| 79 | case 180: | ||
| 80 | addr_mode = ILI9341_MADCTL_MV; | ||
| 81 | break; | ||
| 82 | case 270: | ||
| 83 | addr_mode = ILI9341_MADCTL_MX; | ||
| 84 | break; | ||
| 85 | } | ||
| 86 | addr_mode |= ILI9341_MADCTL_BGR; | ||
| 87 | mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); | ||
| 88 | |||
| 89 | /* Frame Rate */ | ||
| 90 | mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b); | ||
| 91 | |||
| 92 | /* Gamma */ | ||
| 93 | mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x08); | ||
| 94 | mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01); | ||
| 95 | mipi_dbi_command(mipi, ILI9341_PGAMCTRL, | ||
| 96 | 0x1f, 0x1a, 0x18, 0x0a, 0x0f, 0x06, 0x45, 0x87, | ||
| 97 | 0x32, 0x0a, 0x07, 0x02, 0x07, 0x05, 0x00); | ||
| 98 | mipi_dbi_command(mipi, ILI9341_NGAMCTRL, | ||
| 99 | 0x00, 0x25, 0x27, 0x05, 0x10, 0x09, 0x3a, 0x78, | ||
| 100 | 0x4d, 0x05, 0x18, 0x0d, 0x38, 0x3a, 0x1f); | ||
| 101 | |||
| 102 | /* DDRAM */ | ||
| 103 | mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07); | ||
| 104 | |||
| 105 | /* Display */ | ||
| 106 | mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x0a, 0x82, 0x27, 0x00); | ||
| 107 | mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE); | ||
| 108 | msleep(100); | ||
| 109 | |||
| 110 | mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON); | ||
| 111 | msleep(100); | ||
| 112 | |||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | static void mi0283qt_fini(void *data) | ||
| 117 | { | ||
| 118 | struct mipi_dbi *mipi = data; | ||
| 119 | |||
| 120 | DRM_DEBUG_KMS("\n"); | ||
| 121 | regulator_disable(mipi->regulator); | ||
| 122 | } | ||
| 123 | |||
| 124 | static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { | ||
| 125 | .enable = mipi_dbi_pipe_enable, | ||
| 126 | .disable = mipi_dbi_pipe_disable, | ||
| 127 | .update = tinydrm_display_pipe_update, | ||
| 128 | .prepare_fb = tinydrm_display_pipe_prepare_fb, | ||
| 129 | }; | ||
| 130 | |||
| 131 | static const struct drm_display_mode mi0283qt_mode = { | ||
| 132 | TINYDRM_MODE(320, 240, 58, 43), | ||
| 133 | }; | ||
| 134 | |||
| 135 | static struct drm_driver mi0283qt_driver = { | ||
| 136 | .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | | ||
| 137 | DRIVER_ATOMIC, | ||
| 138 | TINYDRM_GEM_DRIVER_OPS, | ||
| 139 | .lastclose = tinydrm_lastclose, | ||
| 140 | .debugfs_init = mipi_dbi_debugfs_init, | ||
| 141 | .name = "mi0283qt", | ||
| 142 | .desc = "Multi-Inno MI0283QT", | ||
| 143 | .date = "20160614", | ||
| 144 | .major = 1, | ||
| 145 | .minor = 0, | ||
| 146 | }; | ||
| 147 | |||
| 148 | static const struct of_device_id mi0283qt_of_match[] = { | ||
| 149 | { .compatible = "multi-inno,mi0283qt" }, | ||
| 150 | {}, | ||
| 151 | }; | ||
| 152 | MODULE_DEVICE_TABLE(of, mi0283qt_of_match); | ||
| 153 | |||
| 154 | static const struct spi_device_id mi0283qt_id[] = { | ||
| 155 | { "mi0283qt", 0 }, | ||
| 156 | { }, | ||
| 157 | }; | ||
| 158 | MODULE_DEVICE_TABLE(spi, mi0283qt_id); | ||
| 159 | |||
| 160 | static int mi0283qt_probe(struct spi_device *spi) | ||
| 161 | { | ||
| 162 | struct device *dev = &spi->dev; | ||
| 163 | struct tinydrm_device *tdev; | ||
| 164 | struct mipi_dbi *mipi; | ||
| 165 | struct gpio_desc *dc; | ||
| 166 | u32 rotation = 0; | ||
| 167 | int ret; | ||
| 168 | |||
| 169 | mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); | ||
| 170 | if (!mipi) | ||
| 171 | return -ENOMEM; | ||
| 172 | |||
| 173 | mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); | ||
| 174 | if (IS_ERR(mipi->reset)) { | ||
| 175 | dev_err(dev, "Failed to get gpio 'reset'\n"); | ||
| 176 | return PTR_ERR(mipi->reset); | ||
| 177 | } | ||
| 178 | |||
| 179 | dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW); | ||
| 180 | if (IS_ERR(dc)) { | ||
| 181 | dev_err(dev, "Failed to get gpio 'dc'\n"); | ||
| 182 | return PTR_ERR(dc); | ||
| 183 | } | ||
| 184 | |||
| 185 | mipi->regulator = devm_regulator_get(dev, "power"); | ||
| 186 | if (IS_ERR(mipi->regulator)) | ||
| 187 | return PTR_ERR(mipi->regulator); | ||
| 188 | |||
| 189 | mipi->backlight = tinydrm_of_find_backlight(dev); | ||
| 190 | if (IS_ERR(mipi->backlight)) | ||
| 191 | return PTR_ERR(mipi->backlight); | ||
| 192 | |||
| 193 | device_property_read_u32(dev, "rotation", &rotation); | ||
| 194 | |||
| 195 | ret = mipi_dbi_spi_init(spi, mipi, dc, &mi0283qt_pipe_funcs, | ||
| 196 | &mi0283qt_driver, &mi0283qt_mode, rotation); | ||
| 197 | if (ret) | ||
| 198 | return ret; | ||
| 199 | |||
| 200 | ret = mi0283qt_init(mipi); | ||
| 201 | if (ret) | ||
| 202 | return ret; | ||
| 203 | |||
| 204 | /* use devres to fini after drm unregister (drv->remove is before) */ | ||
| 205 | ret = devm_add_action(dev, mi0283qt_fini, mipi); | ||
| 206 | if (ret) { | ||
| 207 | mi0283qt_fini(mipi); | ||
| 208 | return ret; | ||
| 209 | } | ||
| 210 | |||
| 211 | tdev = &mipi->tinydrm; | ||
| 212 | |||
| 213 | ret = devm_tinydrm_register(tdev); | ||
| 214 | if (ret) | ||
| 215 | return ret; | ||
| 216 | |||
| 217 | spi_set_drvdata(spi, mipi); | ||
| 218 | |||
| 219 | DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n", | ||
| 220 | tdev->drm->driver->name, dev_name(dev), | ||
| 221 | spi->max_speed_hz / 1000000, | ||
| 222 | tdev->drm->primary->index); | ||
| 223 | |||
| 224 | return 0; | ||
| 225 | } | ||
| 226 | |||
| 227 | static void mi0283qt_shutdown(struct spi_device *spi) | ||
| 228 | { | ||
| 229 | struct mipi_dbi *mipi = spi_get_drvdata(spi); | ||
| 230 | |||
| 231 | tinydrm_shutdown(&mipi->tinydrm); | ||
| 232 | } | ||
| 233 | |||
| 234 | static int __maybe_unused mi0283qt_pm_suspend(struct device *dev) | ||
| 235 | { | ||
| 236 | struct mipi_dbi *mipi = dev_get_drvdata(dev); | ||
| 237 | int ret; | ||
| 238 | |||
| 239 | ret = tinydrm_suspend(&mipi->tinydrm); | ||
| 240 | if (ret) | ||
| 241 | return ret; | ||
| 242 | |||
| 243 | mi0283qt_fini(mipi); | ||
| 244 | |||
| 245 | return 0; | ||
| 246 | } | ||
| 247 | |||
| 248 | static int __maybe_unused mi0283qt_pm_resume(struct device *dev) | ||
| 249 | { | ||
| 250 | struct mipi_dbi *mipi = dev_get_drvdata(dev); | ||
| 251 | int ret; | ||
| 252 | |||
| 253 | ret = mi0283qt_init(mipi); | ||
| 254 | if (ret) | ||
| 255 | return ret; | ||
| 256 | |||
| 257 | return tinydrm_resume(&mipi->tinydrm); | ||
| 258 | } | ||
| 259 | |||
| 260 | static const struct dev_pm_ops mi0283qt_pm_ops = { | ||
| 261 | SET_SYSTEM_SLEEP_PM_OPS(mi0283qt_pm_suspend, mi0283qt_pm_resume) | ||
| 262 | }; | ||
| 263 | |||
| 264 | static struct spi_driver mi0283qt_spi_driver = { | ||
| 265 | .driver = { | ||
| 266 | .name = "mi0283qt", | ||
| 267 | .owner = THIS_MODULE, | ||
| 268 | .of_match_table = mi0283qt_of_match, | ||
| 269 | .pm = &mi0283qt_pm_ops, | ||
| 270 | }, | ||
| 271 | .id_table = mi0283qt_id, | ||
| 272 | .probe = mi0283qt_probe, | ||
| 273 | .shutdown = mi0283qt_shutdown, | ||
| 274 | }; | ||
| 275 | module_spi_driver(mi0283qt_spi_driver); | ||
| 276 | |||
| 277 | MODULE_DESCRIPTION("Multi-Inno MI0283QT DRM driver"); | ||
| 278 | MODULE_AUTHOR("Noralf Trønnes"); | ||
| 279 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c new file mode 100644 index 000000000000..2d21b490005c --- /dev/null +++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c | |||
| @@ -0,0 +1,1005 @@ | |||
| 1 | /* | ||
| 2 | * MIPI Display Bus Interface (DBI) LCD controller support | ||
| 3 | * | ||
| 4 | * Copyright 2016 Noralf Trønnes | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <drm/tinydrm/mipi-dbi.h> | ||
| 13 | #include <drm/tinydrm/tinydrm-helpers.h> | ||
| 14 | #include <linux/debugfs.h> | ||
| 15 | #include <linux/dma-buf.h> | ||
| 16 | #include <linux/gpio/consumer.h> | ||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/regulator/consumer.h> | ||
| 19 | #include <linux/spi/spi.h> | ||
| 20 | #include <video/mipi_display.h> | ||
| 21 | |||
| 22 | #define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */ | ||
| 23 | |||
| 24 | #define DCS_POWER_MODE_DISPLAY BIT(2) | ||
| 25 | #define DCS_POWER_MODE_DISPLAY_NORMAL_MODE BIT(3) | ||
| 26 | #define DCS_POWER_MODE_SLEEP_MODE BIT(4) | ||
| 27 | #define DCS_POWER_MODE_PARTIAL_MODE BIT(5) | ||
| 28 | #define DCS_POWER_MODE_IDLE_MODE BIT(6) | ||
| 29 | #define DCS_POWER_MODE_RESERVED_MASK (BIT(0) | BIT(1) | BIT(7)) | ||
| 30 | |||
| 31 | /** | ||
| 32 | * DOC: overview | ||
| 33 | * | ||
| 34 | * This library provides helpers for MIPI Display Bus Interface (DBI) | ||
| 35 | * compatible display controllers. | ||
| 36 | * | ||
| 37 | * Many controllers for tiny lcd displays are MIPI compliant and can use this | ||
| 38 | * library. If a controller uses registers 0x2A and 0x2B to set the area to | ||
| 39 | * update and uses register 0x2C to write to frame memory, it is most likely | ||
| 40 | * MIPI compliant. | ||
| 41 | * | ||
| 42 | * Only MIPI Type 1 displays are supported since a full frame memory is needed. | ||
| 43 | * | ||
| 44 | * There are 3 MIPI DBI implementation types: | ||
| 45 | * | ||
| 46 | * A. Motorola 6800 type parallel bus | ||
| 47 | * | ||
| 48 | * B. Intel 8080 type parallel bus | ||
| 49 | * | ||
| 50 | * C. SPI type with 3 options: | ||
| 51 | * | ||
| 52 | * 1. 9-bit with the Data/Command signal as the ninth bit | ||
| 53 | * 2. Same as above except it's sent as 16 bits | ||
| 54 | * 3. 8-bit with the Data/Command signal as a separate D/CX pin | ||
| 55 | * | ||
| 56 | * Currently mipi_dbi only supports Type C options 1 and 3 with | ||
| 57 | * mipi_dbi_spi_init(). | ||
| 58 | */ | ||
| 59 | |||
| 60 | #define MIPI_DBI_DEBUG_COMMAND(cmd, data, len) \ | ||
| 61 | ({ \ | ||
| 62 | if (!len) \ | ||
| 63 | DRM_DEBUG_DRIVER("cmd=%02x\n", cmd); \ | ||
| 64 | else if (len <= 32) \ | ||
| 65 | DRM_DEBUG_DRIVER("cmd=%02x, par=%*ph\n", cmd, (int)len, data);\ | ||
| 66 | else \ | ||
| 67 | DRM_DEBUG_DRIVER("cmd=%02x, len=%zu\n", cmd, (int)len); \ | ||
| 68 | }) | ||
| 69 | |||
| 70 | static const u8 mipi_dbi_dcs_read_commands[] = { | ||
| 71 | MIPI_DCS_GET_DISPLAY_ID, | ||
| 72 | MIPI_DCS_GET_RED_CHANNEL, | ||
| 73 | MIPI_DCS_GET_GREEN_CHANNEL, | ||
| 74 | MIPI_DCS_GET_BLUE_CHANNEL, | ||
| 75 | MIPI_DCS_GET_DISPLAY_STATUS, | ||
| 76 | MIPI_DCS_GET_POWER_MODE, | ||
| 77 | MIPI_DCS_GET_ADDRESS_MODE, | ||
| 78 | MIPI_DCS_GET_PIXEL_FORMAT, | ||
| 79 | MIPI_DCS_GET_DISPLAY_MODE, | ||
| 80 | MIPI_DCS_GET_SIGNAL_MODE, | ||
| 81 | MIPI_DCS_GET_DIAGNOSTIC_RESULT, | ||
| 82 | MIPI_DCS_READ_MEMORY_START, | ||
| 83 | MIPI_DCS_READ_MEMORY_CONTINUE, | ||
| 84 | MIPI_DCS_GET_SCANLINE, | ||
| 85 | MIPI_DCS_GET_DISPLAY_BRIGHTNESS, | ||
| 86 | MIPI_DCS_GET_CONTROL_DISPLAY, | ||
| 87 | MIPI_DCS_GET_POWER_SAVE, | ||
| 88 | MIPI_DCS_GET_CABC_MIN_BRIGHTNESS, | ||
| 89 | MIPI_DCS_READ_DDB_START, | ||
| 90 | MIPI_DCS_READ_DDB_CONTINUE, | ||
| 91 | 0, /* sentinel */ | ||
| 92 | }; | ||
| 93 | |||
| 94 | static bool mipi_dbi_command_is_read(struct mipi_dbi *mipi, u8 cmd) | ||
| 95 | { | ||
| 96 | unsigned int i; | ||
| 97 | |||
| 98 | if (!mipi->read_commands) | ||
| 99 | return false; | ||
| 100 | |||
| 101 | for (i = 0; i < 0xff; i++) { | ||
| 102 | if (!mipi->read_commands[i]) | ||
| 103 | return false; | ||
| 104 | if (cmd == mipi->read_commands[i]) | ||
| 105 | return true; | ||
| 106 | } | ||
| 107 | |||
| 108 | return false; | ||
| 109 | } | ||
| 110 | |||
| 111 | /** | ||
| 112 | * mipi_dbi_command_read - MIPI DCS read command | ||
| 113 | * @mipi: MIPI structure | ||
| 114 | * @cmd: Command | ||
| 115 | * @val: Value read | ||
| 116 | * | ||
| 117 | * Send MIPI DCS read command to the controller. | ||
| 118 | * | ||
| 119 | * Returns: | ||
| 120 | * Zero on success, negative error code on failure. | ||
| 121 | */ | ||
| 122 | int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val) | ||
| 123 | { | ||
| 124 | if (!mipi->read_commands) | ||
| 125 | return -EACCES; | ||
| 126 | |||
| 127 | if (!mipi_dbi_command_is_read(mipi, cmd)) | ||
| 128 | return -EINVAL; | ||
| 129 | |||
| 130 | return mipi_dbi_command_buf(mipi, cmd, val, 1); | ||
| 131 | } | ||
| 132 | EXPORT_SYMBOL(mipi_dbi_command_read); | ||
| 133 | |||
| 134 | /** | ||
| 135 | * mipi_dbi_command_buf - MIPI DCS command with parameter(s) in an array | ||
| 136 | * @mipi: MIPI structure | ||
| 137 | * @cmd: Command | ||
| 138 | * @data: Parameter buffer | ||
| 139 | * @len: Buffer length | ||
| 140 | * | ||
| 141 | * Returns: | ||
| 142 | * Zero on success, negative error code on failure. | ||
| 143 | */ | ||
| 144 | int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) | ||
| 145 | { | ||
| 146 | int ret; | ||
| 147 | |||
| 148 | mutex_lock(&mipi->cmdlock); | ||
| 149 | ret = mipi->command(mipi, cmd, data, len); | ||
| 150 | mutex_unlock(&mipi->cmdlock); | ||
| 151 | |||
| 152 | return ret; | ||
| 153 | } | ||
| 154 | EXPORT_SYMBOL(mipi_dbi_command_buf); | ||
| 155 | |||
| 156 | static int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, | ||
| 157 | struct drm_clip_rect *clip, bool swap) | ||
| 158 | { | ||
| 159 | struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); | ||
| 160 | struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; | ||
| 161 | struct drm_format_name_buf format_name; | ||
| 162 | void *src = cma_obj->vaddr; | ||
| 163 | int ret = 0; | ||
| 164 | |||
| 165 | if (import_attach) { | ||
| 166 | ret = dma_buf_begin_cpu_access(import_attach->dmabuf, | ||
| 167 | DMA_FROM_DEVICE); | ||
| 168 | if (ret) | ||
| 169 | return ret; | ||
| 170 | } | ||
| 171 | |||
| 172 | switch (fb->format->format) { | ||
| 173 | case DRM_FORMAT_RGB565: | ||
| 174 | if (swap) | ||
| 175 | tinydrm_swab16(dst, src, fb, clip); | ||
| 176 | else | ||
| 177 | tinydrm_memcpy(dst, src, fb, clip); | ||
| 178 | break; | ||
| 179 | case DRM_FORMAT_XRGB8888: | ||
| 180 | tinydrm_xrgb8888_to_rgb565(dst, src, fb, clip, swap); | ||
| 181 | break; | ||
| 182 | default: | ||
| 183 | dev_err_once(fb->dev->dev, "Format is not supported: %s\n", | ||
| 184 | drm_get_format_name(fb->format->format, | ||
| 185 | &format_name)); | ||
| 186 | return -EINVAL; | ||
| 187 | } | ||
| 188 | |||
| 189 | if (import_attach) | ||
| 190 | ret = dma_buf_end_cpu_access(import_attach->dmabuf, | ||
| 191 | DMA_FROM_DEVICE); | ||
| 192 | return ret; | ||
| 193 | } | ||
| 194 | |||
| 195 | static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb, | ||
| 196 | struct drm_file *file_priv, | ||
| 197 | unsigned int flags, unsigned int color, | ||
| 198 | struct drm_clip_rect *clips, | ||
| 199 | unsigned int num_clips) | ||
| 200 | { | ||
| 201 | struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); | ||
| 202 | struct tinydrm_device *tdev = fb->dev->dev_private; | ||
| 203 | struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); | ||
| 204 | bool swap = mipi->swap_bytes; | ||
| 205 | struct drm_clip_rect clip; | ||
| 206 | int ret = 0; | ||
| 207 | bool full; | ||
| 208 | void *tr; | ||
| 209 | |||
| 210 | mutex_lock(&tdev->dirty_lock); | ||
| 211 | |||
| 212 | if (!mipi->enabled) | ||
| 213 | goto out_unlock; | ||
| 214 | |||
| 215 | /* fbdev can flush even when we're not interested */ | ||
| 216 | if (tdev->pipe.plane.fb != fb) | ||
| 217 | goto out_unlock; | ||
| 218 | |||
| 219 | full = tinydrm_merge_clips(&clip, clips, num_clips, flags, | ||
| 220 | fb->width, fb->height); | ||
| 221 | |||
| 222 | DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id, | ||
| 223 | clip.x1, clip.x2, clip.y1, clip.y2); | ||
| 224 | |||
| 225 | if (!mipi->dc || !full || swap || | ||
| 226 | fb->format->format == DRM_FORMAT_XRGB8888) { | ||
| 227 | tr = mipi->tx_buf; | ||
| 228 | ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap); | ||
| 229 | if (ret) | ||
| 230 | goto out_unlock; | ||
| 231 | } else { | ||
| 232 | tr = cma_obj->vaddr; | ||
| 233 | } | ||
| 234 | |||
| 235 | mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS, | ||
| 236 | (clip.x1 >> 8) & 0xFF, clip.x1 & 0xFF, | ||
| 237 | (clip.x2 >> 8) & 0xFF, (clip.x2 - 1) & 0xFF); | ||
| 238 | mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS, | ||
| 239 | (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF, | ||
| 240 | (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF); | ||
| 241 | |||
| 242 | ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr, | ||
| 243 | (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2); | ||
| 244 | |||
| 245 | out_unlock: | ||
| 246 | mutex_unlock(&tdev->dirty_lock); | ||
| 247 | |||
| 248 | if (ret) | ||
| 249 | dev_err_once(fb->dev->dev, "Failed to update display %d\n", | ||
| 250 | ret); | ||
| 251 | |||
| 252 | return ret; | ||
| 253 | } | ||
| 254 | |||
| 255 | static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = { | ||
| 256 | .destroy = drm_fb_cma_destroy, | ||
| 257 | .create_handle = drm_fb_cma_create_handle, | ||
| 258 | .dirty = mipi_dbi_fb_dirty, | ||
| 259 | }; | ||
| 260 | |||
| 261 | /** | ||
| 262 | * mipi_dbi_pipe_enable - MIPI DBI pipe enable helper | ||
| 263 | * @pipe: Display pipe | ||
| 264 | * @crtc_state: CRTC state | ||
| 265 | * | ||
| 266 | * This function enables backlight. Drivers can use this as their | ||
| 267 | * &drm_simple_display_pipe_funcs->enable callback. | ||
| 268 | */ | ||
| 269 | void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe, | ||
| 270 | struct drm_crtc_state *crtc_state) | ||
| 271 | { | ||
| 272 | struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); | ||
| 273 | struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); | ||
| 274 | struct drm_framebuffer *fb = pipe->plane.fb; | ||
| 275 | |||
| 276 | DRM_DEBUG_KMS("\n"); | ||
| 277 | |||
| 278 | mipi->enabled = true; | ||
| 279 | if (fb) | ||
| 280 | fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0); | ||
| 281 | |||
| 282 | tinydrm_enable_backlight(mipi->backlight); | ||
| 283 | } | ||
| 284 | EXPORT_SYMBOL(mipi_dbi_pipe_enable); | ||
| 285 | |||
| 286 | static void mipi_dbi_blank(struct mipi_dbi *mipi) | ||
| 287 | { | ||
| 288 | struct drm_device *drm = mipi->tinydrm.drm; | ||
| 289 | u16 height = drm->mode_config.min_height; | ||
| 290 | u16 width = drm->mode_config.min_width; | ||
| 291 | size_t len = width * height * 2; | ||
| 292 | |||
| 293 | memset(mipi->tx_buf, 0, len); | ||
| 294 | |||
| 295 | mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0, | ||
| 296 | (width >> 8) & 0xFF, (width - 1) & 0xFF); | ||
| 297 | mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0, | ||
| 298 | (height >> 8) & 0xFF, (height - 1) & 0xFF); | ||
| 299 | mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, | ||
| 300 | (u8 *)mipi->tx_buf, len); | ||
| 301 | } | ||
| 302 | |||
| 303 | /** | ||
| 304 | * mipi_dbi_pipe_disable - MIPI DBI pipe disable helper | ||
| 305 | * @pipe: Display pipe | ||
| 306 | * | ||
| 307 | * This function disables backlight if present or if not the | ||
| 308 | * display memory is blanked. Drivers can use this as their | ||
| 309 | * &drm_simple_display_pipe_funcs->disable callback. | ||
| 310 | */ | ||
| 311 | void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe) | ||
| 312 | { | ||
| 313 | struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); | ||
| 314 | struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); | ||
| 315 | |||
| 316 | DRM_DEBUG_KMS("\n"); | ||
| 317 | |||
| 318 | mipi->enabled = false; | ||
| 319 | |||
| 320 | if (mipi->backlight) | ||
| 321 | tinydrm_disable_backlight(mipi->backlight); | ||
| 322 | else | ||
| 323 | mipi_dbi_blank(mipi); | ||
| 324 | } | ||
| 325 | EXPORT_SYMBOL(mipi_dbi_pipe_disable); | ||
| 326 | |||
| 327 | static const uint32_t mipi_dbi_formats[] = { | ||
| 328 | DRM_FORMAT_RGB565, | ||
| 329 | DRM_FORMAT_XRGB8888, | ||
| 330 | }; | ||
| 331 | |||
| 332 | /** | ||
| 333 | * mipi_dbi_init - MIPI DBI initialization | ||
| 334 | * @dev: Parent device | ||
| 335 | * @mipi: &mipi_dbi structure to initialize | ||
| 336 | * @pipe_funcs: Display pipe functions | ||
| 337 | * @driver: DRM driver | ||
| 338 | * @mode: Display mode | ||
| 339 | * @rotation: Initial rotation in degrees Counter Clock Wise | ||
| 340 | * | ||
| 341 | * This function initializes a &mipi_dbi structure and it's underlying | ||
| 342 | * @tinydrm_device. It also sets up the display pipeline. | ||
| 343 | * | ||
| 344 | * Supported formats: Native RGB565 and emulated XRGB8888. | ||
| 345 | * | ||
| 346 | * Objects created by this function will be automatically freed on driver | ||
| 347 | * detach (devres). | ||
| 348 | * | ||
| 349 | * Returns: | ||
| 350 | * Zero on success, negative error code on failure. | ||
| 351 | */ | ||
| 352 | int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, | ||
| 353 | const struct drm_simple_display_pipe_funcs *pipe_funcs, | ||
| 354 | struct drm_driver *driver, | ||
| 355 | const struct drm_display_mode *mode, unsigned int rotation) | ||
| 356 | { | ||
| 357 | size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16); | ||
| 358 | struct tinydrm_device *tdev = &mipi->tinydrm; | ||
| 359 | int ret; | ||
| 360 | |||
| 361 | if (!mipi->command) | ||
| 362 | return -EINVAL; | ||
| 363 | |||
| 364 | mutex_init(&mipi->cmdlock); | ||
| 365 | |||
| 366 | mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); | ||
| 367 | if (!mipi->tx_buf) | ||
| 368 | return -ENOMEM; | ||
| 369 | |||
| 370 | ret = devm_tinydrm_init(dev, tdev, &mipi_dbi_fb_funcs, driver); | ||
| 371 | if (ret) | ||
| 372 | return ret; | ||
| 373 | |||
| 374 | /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */ | ||
| 375 | ret = tinydrm_display_pipe_init(tdev, pipe_funcs, | ||
| 376 | DRM_MODE_CONNECTOR_VIRTUAL, | ||
| 377 | mipi_dbi_formats, | ||
| 378 | ARRAY_SIZE(mipi_dbi_formats), mode, | ||
| 379 | rotation); | ||
| 380 | if (ret) | ||
| 381 | return ret; | ||
| 382 | |||
| 383 | tdev->drm->mode_config.preferred_depth = 16; | ||
| 384 | mipi->rotation = rotation; | ||
| 385 | |||
| 386 | drm_mode_config_reset(tdev->drm); | ||
| 387 | |||
| 388 | DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", | ||
| 389 | tdev->drm->mode_config.preferred_depth, rotation); | ||
| 390 | |||
| 391 | return 0; | ||
| 392 | } | ||
| 393 | EXPORT_SYMBOL(mipi_dbi_init); | ||
| 394 | |||
| 395 | /** | ||
| 396 | * mipi_dbi_hw_reset - Hardware reset of controller | ||
| 397 | * @mipi: MIPI DBI structure | ||
| 398 | * | ||
| 399 | * Reset controller if the &mipi_dbi->reset gpio is set. | ||
| 400 | */ | ||
| 401 | void mipi_dbi_hw_reset(struct mipi_dbi *mipi) | ||
| 402 | { | ||
| 403 | if (!mipi->reset) | ||
| 404 | return; | ||
| 405 | |||
| 406 | gpiod_set_value_cansleep(mipi->reset, 0); | ||
| 407 | msleep(20); | ||
| 408 | gpiod_set_value_cansleep(mipi->reset, 1); | ||
| 409 | msleep(120); | ||
| 410 | } | ||
| 411 | EXPORT_SYMBOL(mipi_dbi_hw_reset); | ||
| 412 | |||
| 413 | /** | ||
| 414 | * mipi_dbi_display_is_on - Check if display is on | ||
| 415 | * @mipi: MIPI DBI structure | ||
| 416 | * | ||
| 417 | * This function checks the Power Mode register (if readable) to see if | ||
| 418 | * display output is turned on. This can be used to see if the bootloader | ||
| 419 | * has already turned on the display avoiding flicker when the pipeline is | ||
| 420 | * enabled. | ||
| 421 | * | ||
| 422 | * Returns: | ||
| 423 | * true if the display can be verified to be on, false otherwise. | ||
| 424 | */ | ||
| 425 | bool mipi_dbi_display_is_on(struct mipi_dbi *mipi) | ||
| 426 | { | ||
| 427 | u8 val; | ||
| 428 | |||
| 429 | if (mipi_dbi_command_read(mipi, MIPI_DCS_GET_POWER_MODE, &val)) | ||
| 430 | return false; | ||
| 431 | |||
| 432 | val &= ~DCS_POWER_MODE_RESERVED_MASK; | ||
| 433 | |||
| 434 | if (val != (DCS_POWER_MODE_DISPLAY | | ||
| 435 | DCS_POWER_MODE_DISPLAY_NORMAL_MODE | DCS_POWER_MODE_SLEEP_MODE)) | ||
| 436 | return false; | ||
| 437 | |||
| 438 | DRM_DEBUG_DRIVER("Display is ON\n"); | ||
| 439 | |||
| 440 | return true; | ||
| 441 | } | ||
| 442 | EXPORT_SYMBOL(mipi_dbi_display_is_on); | ||
| 443 | |||
| 444 | #if IS_ENABLED(CONFIG_SPI) | ||
| 445 | |||
| 446 | /* | ||
| 447 | * Many controllers have a max speed of 10MHz, but can be pushed way beyond | ||
| 448 | * that. Increase reliability by running pixel data at max speed and the rest | ||
| 449 | * at 10MHz, preventing transfer glitches from messing up the init settings. | ||
| 450 | */ | ||
| 451 | static u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len) | ||
| 452 | { | ||
| 453 | if (len > 64) | ||
| 454 | return 0; /* use default */ | ||
| 455 | |||
| 456 | return min_t(u32, 10000000, spi->max_speed_hz); | ||
| 457 | } | ||
| 458 | |||
| 459 | /* | ||
| 460 | * MIPI DBI Type C Option 1 | ||
| 461 | * | ||
| 462 | * If the SPI controller doesn't have 9 bits per word support, | ||
| 463 | * use blocks of 9 bytes to send 8x 9-bit words using a 8-bit SPI transfer. | ||
| 464 | * Pad partial blocks with MIPI_DCS_NOP (zero). | ||
| 465 | * This is how the D/C bit (x) is added: | ||
| 466 | * x7654321 | ||
| 467 | * 0x765432 | ||
| 468 | * 10x76543 | ||
| 469 | * 210x7654 | ||
| 470 | * 3210x765 | ||
| 471 | * 43210x76 | ||
| 472 | * 543210x7 | ||
| 473 | * 6543210x | ||
| 474 | * 76543210 | ||
| 475 | */ | ||
| 476 | |||
| 477 | static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc, | ||
| 478 | const void *buf, size_t len, | ||
| 479 | unsigned int bpw) | ||
| 480 | { | ||
| 481 | bool swap_bytes = (bpw == 16 && tinydrm_machine_little_endian()); | ||
| 482 | size_t chunk, max_chunk = mipi->tx_buf9_len; | ||
| 483 | struct spi_device *spi = mipi->spi; | ||
| 484 | struct spi_transfer tr = { | ||
| 485 | .tx_buf = mipi->tx_buf9, | ||
| 486 | .bits_per_word = 8, | ||
| 487 | }; | ||
| 488 | struct spi_message m; | ||
| 489 | const u8 *src = buf; | ||
| 490 | int i, ret; | ||
| 491 | u8 *dst; | ||
| 492 | |||
| 493 | if (drm_debug & DRM_UT_DRIVER) | ||
| 494 | pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n", | ||
| 495 | __func__, dc, max_chunk); | ||
| 496 | |||
| 497 | tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len); | ||
| 498 | spi_message_init_with_transfers(&m, &tr, 1); | ||
| 499 | |||
| 500 | if (!dc) { | ||
| 501 | if (WARN_ON_ONCE(len != 1)) | ||
| 502 | return -EINVAL; | ||
| 503 | |||
| 504 | /* Command: pad no-op's (zeroes) at beginning of block */ | ||
| 505 | dst = mipi->tx_buf9; | ||
| 506 | memset(dst, 0, 9); | ||
| 507 | dst[8] = *src; | ||
| 508 | tr.len = 9; | ||
| 509 | |||
| 510 | tinydrm_dbg_spi_message(spi, &m); | ||
| 511 | |||
| 512 | return spi_sync(spi, &m); | ||
| 513 | } | ||
| 514 | |||
| 515 | /* max with room for adding one bit per byte */ | ||
| 516 | max_chunk = max_chunk / 9 * 8; | ||
| 517 | /* but no bigger than len */ | ||
| 518 | max_chunk = min(max_chunk, len); | ||
| 519 | /* 8 byte blocks */ | ||
| 520 | max_chunk = max_t(size_t, 8, max_chunk & ~0x7); | ||
| 521 | |||
| 522 | while (len) { | ||
| 523 | size_t added = 0; | ||
| 524 | |||
| 525 | chunk = min(len, max_chunk); | ||
| 526 | len -= chunk; | ||
| 527 | dst = mipi->tx_buf9; | ||
| 528 | |||
| 529 | if (chunk < 8) { | ||
| 530 | u8 val, carry = 0; | ||
| 531 | |||
| 532 | /* Data: pad no-op's (zeroes) at end of block */ | ||
| 533 | memset(dst, 0, 9); | ||
| 534 | |||
| 535 | if (swap_bytes) { | ||
| 536 | for (i = 1; i < (chunk + 1); i++) { | ||
| 537 | val = src[1]; | ||
| 538 | *dst++ = carry | BIT(8 - i) | (val >> i); | ||
| 539 | carry = val << (8 - i); | ||
| 540 | i++; | ||
| 541 | val = src[0]; | ||
| 542 | *dst++ = carry | BIT(8 - i) | (val >> i); | ||
| 543 | carry = val << (8 - i); | ||
| 544 | src += 2; | ||
| 545 | } | ||
| 546 | *dst++ = carry; | ||
| 547 | } else { | ||
| 548 | for (i = 1; i < (chunk + 1); i++) { | ||
| 549 | val = *src++; | ||
| 550 | *dst++ = carry | BIT(8 - i) | (val >> i); | ||
| 551 | carry = val << (8 - i); | ||
| 552 | } | ||
| 553 | *dst++ = carry; | ||
| 554 | } | ||
| 555 | |||
| 556 | chunk = 8; | ||
| 557 | added = 1; | ||
| 558 | } else { | ||
| 559 | for (i = 0; i < chunk; i += 8) { | ||
| 560 | if (swap_bytes) { | ||
| 561 | *dst++ = BIT(7) | (src[1] >> 1); | ||
| 562 | *dst++ = (src[1] << 7) | BIT(6) | (src[0] >> 2); | ||
| 563 | *dst++ = (src[0] << 6) | BIT(5) | (src[3] >> 3); | ||
| 564 | *dst++ = (src[3] << 5) | BIT(4) | (src[2] >> 4); | ||
| 565 | *dst++ = (src[2] << 4) | BIT(3) | (src[5] >> 5); | ||
| 566 | *dst++ = (src[5] << 3) | BIT(2) | (src[4] >> 6); | ||
| 567 | *dst++ = (src[4] << 2) | BIT(1) | (src[7] >> 7); | ||
| 568 | *dst++ = (src[7] << 1) | BIT(0); | ||
| 569 | *dst++ = src[6]; | ||
| 570 | } else { | ||
| 571 | *dst++ = BIT(7) | (src[0] >> 1); | ||
| 572 | *dst++ = (src[0] << 7) | BIT(6) | (src[1] >> 2); | ||
| 573 | *dst++ = (src[1] << 6) | BIT(5) | (src[2] >> 3); | ||
| 574 | *dst++ = (src[2] << 5) | BIT(4) | (src[3] >> 4); | ||
| 575 | *dst++ = (src[3] << 4) | BIT(3) | (src[4] >> 5); | ||
| 576 | *dst++ = (src[4] << 3) | BIT(2) | (src[5] >> 6); | ||
| 577 | *dst++ = (src[5] << 2) | BIT(1) | (src[6] >> 7); | ||
| 578 | *dst++ = (src[6] << 1) | BIT(0); | ||
| 579 | *dst++ = src[7]; | ||
| 580 | } | ||
| 581 | |||
| 582 | src += 8; | ||
| 583 | added++; | ||
| 584 | } | ||
| 585 | } | ||
| 586 | |||
| 587 | tr.len = chunk + added; | ||
| 588 | |||
| 589 | tinydrm_dbg_spi_message(spi, &m); | ||
| 590 | ret = spi_sync(spi, &m); | ||
| 591 | if (ret) | ||
| 592 | return ret; | ||
| 593 | }; | ||
| 594 | |||
| 595 | return 0; | ||
| 596 | } | ||
| 597 | |||
| 598 | static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc, | ||
| 599 | const void *buf, size_t len, | ||
| 600 | unsigned int bpw) | ||
| 601 | { | ||
| 602 | struct spi_device *spi = mipi->spi; | ||
| 603 | struct spi_transfer tr = { | ||
| 604 | .bits_per_word = 9, | ||
| 605 | }; | ||
| 606 | const u16 *src16 = buf; | ||
| 607 | const u8 *src8 = buf; | ||
| 608 | struct spi_message m; | ||
| 609 | size_t max_chunk; | ||
| 610 | u16 *dst16; | ||
| 611 | int ret; | ||
| 612 | |||
| 613 | if (!tinydrm_spi_bpw_supported(spi, 9)) | ||
| 614 | return mipi_dbi_spi1e_transfer(mipi, dc, buf, len, bpw); | ||
| 615 | |||
| 616 | tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len); | ||
| 617 | max_chunk = mipi->tx_buf9_len; | ||
| 618 | dst16 = mipi->tx_buf9; | ||
| 619 | |||
| 620 | if (drm_debug & DRM_UT_DRIVER) | ||
| 621 | pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n", | ||
| 622 | __func__, dc, max_chunk); | ||
| 623 | |||
| 624 | max_chunk = min(max_chunk / 2, len); | ||
| 625 | |||
| 626 | spi_message_init_with_transfers(&m, &tr, 1); | ||
| 627 | tr.tx_buf = dst16; | ||
| 628 | |||
| 629 | while (len) { | ||
| 630 | size_t chunk = min(len, max_chunk); | ||
| 631 | unsigned int i; | ||
| 632 | |||
| 633 | if (bpw == 16 && tinydrm_machine_little_endian()) { | ||
| 634 | for (i = 0; i < (chunk * 2); i += 2) { | ||
| 635 | dst16[i] = *src16 >> 8; | ||
| 636 | dst16[i + 1] = *src16++ & 0xFF; | ||
| 637 | if (dc) { | ||
| 638 | dst16[i] |= 0x0100; | ||
| 639 | dst16[i + 1] |= 0x0100; | ||
| 640 | } | ||
| 641 | } | ||
| 642 | } else { | ||
| 643 | for (i = 0; i < chunk; i++) { | ||
| 644 | dst16[i] = *src8++; | ||
| 645 | if (dc) | ||
| 646 | dst16[i] |= 0x0100; | ||
| 647 | } | ||
| 648 | } | ||
| 649 | |||
| 650 | tr.len = chunk; | ||
| 651 | len -= chunk; | ||
| 652 | |||
| 653 | tinydrm_dbg_spi_message(spi, &m); | ||
| 654 | ret = spi_sync(spi, &m); | ||
| 655 | if (ret) | ||
| 656 | return ret; | ||
| 657 | }; | ||
| 658 | |||
| 659 | return 0; | ||
| 660 | } | ||
| 661 | |||
| 662 | static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, | ||
| 663 | u8 *parameters, size_t num) | ||
| 664 | { | ||
| 665 | unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; | ||
| 666 | int ret; | ||
| 667 | |||
| 668 | if (mipi_dbi_command_is_read(mipi, cmd)) | ||
| 669 | return -ENOTSUPP; | ||
| 670 | |||
| 671 | MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num); | ||
| 672 | |||
| 673 | ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8); | ||
| 674 | if (ret || !num) | ||
| 675 | return ret; | ||
| 676 | |||
| 677 | return mipi_dbi_spi1_transfer(mipi, 1, parameters, num, bpw); | ||
| 678 | } | ||
| 679 | |||
| 680 | /* MIPI DBI Type C Option 3 */ | ||
| 681 | |||
| 682 | static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, | ||
| 683 | u8 *data, size_t len) | ||
| 684 | { | ||
| 685 | struct spi_device *spi = mipi->spi; | ||
| 686 | u32 speed_hz = min_t(u32, MIPI_DBI_MAX_SPI_READ_SPEED, | ||
| 687 | spi->max_speed_hz / 2); | ||
| 688 | struct spi_transfer tr[2] = { | ||
| 689 | { | ||
| 690 | .speed_hz = speed_hz, | ||
| 691 | .tx_buf = &cmd, | ||
| 692 | .len = 1, | ||
| 693 | }, { | ||
| 694 | .speed_hz = speed_hz, | ||
| 695 | .len = len, | ||
| 696 | }, | ||
| 697 | }; | ||
| 698 | struct spi_message m; | ||
| 699 | u8 *buf; | ||
| 700 | int ret; | ||
| 701 | |||
| 702 | if (!len) | ||
| 703 | return -EINVAL; | ||
| 704 | |||
| 705 | /* | ||
| 706 | * Support non-standard 24-bit and 32-bit Nokia read commands which | ||
| 707 | * start with a dummy clock, so we need to read an extra byte. | ||
| 708 | */ | ||
| 709 | if (cmd == MIPI_DCS_GET_DISPLAY_ID || | ||
| 710 | cmd == MIPI_DCS_GET_DISPLAY_STATUS) { | ||
| 711 | if (!(len == 3 || len == 4)) | ||
| 712 | return -EINVAL; | ||
| 713 | |||
| 714 | tr[1].len = len + 1; | ||
| 715 | } | ||
| 716 | |||
| 717 | buf = kmalloc(tr[1].len, GFP_KERNEL); | ||
| 718 | if (!buf) | ||
| 719 | return -ENOMEM; | ||
| 720 | |||
| 721 | tr[1].rx_buf = buf; | ||
| 722 | gpiod_set_value_cansleep(mipi->dc, 0); | ||
| 723 | |||
| 724 | spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr)); | ||
| 725 | ret = spi_sync(spi, &m); | ||
| 726 | if (ret) | ||
| 727 | goto err_free; | ||
| 728 | |||
| 729 | tinydrm_dbg_spi_message(spi, &m); | ||
| 730 | |||
| 731 | if (tr[1].len == len) { | ||
| 732 | memcpy(data, buf, len); | ||
| 733 | } else { | ||
| 734 | unsigned int i; | ||
| 735 | |||
| 736 | for (i = 0; i < len; i++) | ||
| 737 | data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7)); | ||
| 738 | } | ||
| 739 | |||
| 740 | MIPI_DBI_DEBUG_COMMAND(cmd, data, len); | ||
| 741 | |||
| 742 | err_free: | ||
| 743 | kfree(buf); | ||
| 744 | |||
| 745 | return ret; | ||
| 746 | } | ||
| 747 | |||
| 748 | static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, | ||
| 749 | u8 *par, size_t num) | ||
| 750 | { | ||
| 751 | struct spi_device *spi = mipi->spi; | ||
| 752 | unsigned int bpw = 8; | ||
| 753 | u32 speed_hz; | ||
| 754 | int ret; | ||
| 755 | |||
| 756 | if (mipi_dbi_command_is_read(mipi, cmd)) | ||
| 757 | return mipi_dbi_typec3_command_read(mipi, cmd, par, num); | ||
| 758 | |||
| 759 | MIPI_DBI_DEBUG_COMMAND(cmd, par, num); | ||
| 760 | |||
| 761 | gpiod_set_value_cansleep(mipi->dc, 0); | ||
| 762 | speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); | ||
| 763 | ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); | ||
| 764 | if (ret || !num) | ||
| 765 | return ret; | ||
| 766 | |||
| 767 | if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) | ||
| 768 | bpw = 16; | ||
| 769 | |||
| 770 | gpiod_set_value_cansleep(mipi->dc, 1); | ||
| 771 | speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num); | ||
| 772 | |||
| 773 | return tinydrm_spi_transfer(spi, speed_hz, NULL, bpw, par, num); | ||
| 774 | } | ||
| 775 | |||
| 776 | /** | ||
| 777 | * mipi_dbi_spi_init - Initialize MIPI DBI SPI interfaced controller | ||
| 778 | * @spi: SPI device | ||
| 779 | * @dc: D/C gpio (optional) | ||
| 780 | * @mipi: &mipi_dbi structure to initialize | ||
| 781 | * @pipe_funcs: Display pipe functions | ||
| 782 | * @driver: DRM driver | ||
| 783 | * @mode: Display mode | ||
| 784 | * @rotation: Initial rotation in degrees Counter Clock Wise | ||
| 785 | * | ||
| 786 | * This function sets &mipi_dbi->command, enables &mipi->read_commands for the | ||
| 787 | * usual read commands and initializes @mipi using mipi_dbi_init(). | ||
| 788 | * | ||
| 789 | * If @dc is set, a Type C Option 3 interface is assumed, if not | ||
| 790 | * Type C Option 1. | ||
| 791 | * | ||
| 792 | * If the SPI master driver doesn't support the necessary bits per word, | ||
| 793 | * the following transformation is used: | ||
| 794 | * | ||
| 795 | * - 9-bit: reorder buffer as 9x 8-bit words, padded with no-op command. | ||
| 796 | * - 16-bit: if big endian send as 8-bit, if little endian swap bytes | ||
| 797 | * | ||
| 798 | * Returns: | ||
| 799 | * Zero on success, negative error code on failure. | ||
| 800 | */ | ||
| 801 | int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi, | ||
| 802 | struct gpio_desc *dc, | ||
| 803 | const struct drm_simple_display_pipe_funcs *pipe_funcs, | ||
| 804 | struct drm_driver *driver, | ||
| 805 | const struct drm_display_mode *mode, | ||
| 806 | unsigned int rotation) | ||
| 807 | { | ||
| 808 | size_t tx_size = tinydrm_spi_max_transfer_size(spi, 0); | ||
| 809 | struct device *dev = &spi->dev; | ||
| 810 | int ret; | ||
| 811 | |||
| 812 | if (tx_size < 16) { | ||
| 813 | DRM_ERROR("SPI transmit buffer too small: %zu\n", tx_size); | ||
| 814 | return -EINVAL; | ||
| 815 | } | ||
| 816 | |||
| 817 | /* | ||
| 818 | * Even though it's not the SPI device that does DMA (the master does), | ||
| 819 | * the dma mask is necessary for the dma_alloc_wc() in | ||
| 820 | * drm_gem_cma_create(). The dma_addr returned will be a physical | ||
| 821 | * adddress which might be different from the bus address, but this is | ||
| 822 | * not a problem since the address will not be used. | ||
| 823 | * The virtual address is used in the transfer and the SPI core | ||
| 824 | * re-maps it on the SPI master device using the DMA streaming API | ||
| 825 | * (spi_map_buf()). | ||
| 826 | */ | ||
| 827 | if (!dev->coherent_dma_mask) { | ||
| 828 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
| 829 | if (ret) { | ||
| 830 | dev_warn(dev, "Failed to set dma mask %d\n", ret); | ||
| 831 | return ret; | ||
| 832 | } | ||
| 833 | } | ||
| 834 | |||
| 835 | mipi->spi = spi; | ||
| 836 | mipi->read_commands = mipi_dbi_dcs_read_commands; | ||
| 837 | |||
| 838 | if (dc) { | ||
| 839 | mipi->command = mipi_dbi_typec3_command; | ||
| 840 | mipi->dc = dc; | ||
| 841 | if (tinydrm_machine_little_endian() && | ||
| 842 | !tinydrm_spi_bpw_supported(spi, 16)) | ||
| 843 | mipi->swap_bytes = true; | ||
| 844 | } else { | ||
| 845 | mipi->command = mipi_dbi_typec1_command; | ||
| 846 | mipi->tx_buf9_len = tx_size; | ||
| 847 | mipi->tx_buf9 = devm_kmalloc(dev, tx_size, GFP_KERNEL); | ||
| 848 | if (!mipi->tx_buf9) | ||
| 849 | return -ENOMEM; | ||
| 850 | } | ||
| 851 | |||
| 852 | return mipi_dbi_init(dev, mipi, pipe_funcs, driver, mode, rotation); | ||
| 853 | } | ||
| 854 | EXPORT_SYMBOL(mipi_dbi_spi_init); | ||
| 855 | |||
| 856 | #endif /* CONFIG_SPI */ | ||
| 857 | |||
| 858 | #ifdef CONFIG_DEBUG_FS | ||
| 859 | |||
| 860 | static ssize_t mipi_dbi_debugfs_command_write(struct file *file, | ||
| 861 | const char __user *ubuf, | ||
| 862 | size_t count, loff_t *ppos) | ||
| 863 | { | ||
| 864 | struct seq_file *m = file->private_data; | ||
| 865 | struct mipi_dbi *mipi = m->private; | ||
| 866 | u8 val, cmd = 0, parameters[64]; | ||
| 867 | char *buf, *pos, *token; | ||
| 868 | unsigned int i; | ||
| 869 | int ret; | ||
| 870 | |||
| 871 | buf = memdup_user_nul(ubuf, count); | ||
| 872 | if (IS_ERR(buf)) | ||
| 873 | return PTR_ERR(buf); | ||
| 874 | |||
| 875 | /* strip trailing whitespace */ | ||
| 876 | for (i = count - 1; i > 0; i--) | ||
| 877 | if (isspace(buf[i])) | ||
| 878 | buf[i] = '\0'; | ||
| 879 | else | ||
| 880 | break; | ||
| 881 | i = 0; | ||
| 882 | pos = buf; | ||
| 883 | while (pos) { | ||
| 884 | token = strsep(&pos, " "); | ||
| 885 | if (!token) { | ||
| 886 | ret = -EINVAL; | ||
| 887 | goto err_free; | ||
| 888 | } | ||
| 889 | |||
| 890 | ret = kstrtou8(token, 16, &val); | ||
| 891 | if (ret < 0) | ||
| 892 | goto err_free; | ||
| 893 | |||
| 894 | if (token == buf) | ||
| 895 | cmd = val; | ||
| 896 | else | ||
| 897 | parameters[i++] = val; | ||
| 898 | |||
| 899 | if (i == 64) { | ||
| 900 | ret = -E2BIG; | ||
| 901 | goto err_free; | ||
| 902 | } | ||
| 903 | } | ||
| 904 | |||
| 905 | ret = mipi_dbi_command_buf(mipi, cmd, parameters, i); | ||
| 906 | |||
| 907 | err_free: | ||
| 908 | kfree(buf); | ||
| 909 | |||
| 910 | return ret < 0 ? ret : count; | ||
| 911 | } | ||
| 912 | |||
| 913 | static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused) | ||
| 914 | { | ||
| 915 | struct mipi_dbi *mipi = m->private; | ||
| 916 | u8 cmd, val[4]; | ||
| 917 | size_t len, i; | ||
| 918 | int ret; | ||
| 919 | |||
| 920 | for (cmd = 0; cmd < 255; cmd++) { | ||
| 921 | if (!mipi_dbi_command_is_read(mipi, cmd)) | ||
| 922 | continue; | ||
| 923 | |||
| 924 | switch (cmd) { | ||
| 925 | case MIPI_DCS_READ_MEMORY_START: | ||
| 926 | case MIPI_DCS_READ_MEMORY_CONTINUE: | ||
| 927 | len = 2; | ||
| 928 | break; | ||
| 929 | case MIPI_DCS_GET_DISPLAY_ID: | ||
| 930 | len = 3; | ||
| 931 | break; | ||
| 932 | case MIPI_DCS_GET_DISPLAY_STATUS: | ||
| 933 | len = 4; | ||
| 934 | break; | ||
| 935 | default: | ||
| 936 | len = 1; | ||
| 937 | break; | ||
| 938 | } | ||
| 939 | |||
| 940 | seq_printf(m, "%02x: ", cmd); | ||
| 941 | ret = mipi_dbi_command_buf(mipi, cmd, val, len); | ||
| 942 | if (ret) { | ||
| 943 | seq_puts(m, "XX\n"); | ||
| 944 | continue; | ||
| 945 | } | ||
| 946 | |||
| 947 | for (i = 0; i < len; i++) | ||
| 948 | seq_printf(m, "%02x", val[i]); | ||
| 949 | seq_puts(m, "\n"); | ||
| 950 | } | ||
| 951 | |||
| 952 | return 0; | ||
| 953 | } | ||
| 954 | |||
| 955 | static int mipi_dbi_debugfs_command_open(struct inode *inode, | ||
| 956 | struct file *file) | ||
| 957 | { | ||
| 958 | return single_open(file, mipi_dbi_debugfs_command_show, | ||
| 959 | inode->i_private); | ||
| 960 | } | ||
| 961 | |||
| 962 | static const struct file_operations mipi_dbi_debugfs_command_fops = { | ||
| 963 | .owner = THIS_MODULE, | ||
| 964 | .open = mipi_dbi_debugfs_command_open, | ||
| 965 | .read = seq_read, | ||
| 966 | .llseek = seq_lseek, | ||
| 967 | .release = single_release, | ||
| 968 | .write = mipi_dbi_debugfs_command_write, | ||
| 969 | }; | ||
| 970 | |||
| 971 | static const struct drm_info_list mipi_dbi_debugfs_list[] = { | ||
| 972 | { "fb", drm_fb_cma_debugfs_show, 0 }, | ||
| 973 | }; | ||
| 974 | |||
| 975 | /** | ||
| 976 | * mipi_dbi_debugfs_init - Create debugfs entries | ||
| 977 | * @minor: DRM minor | ||
| 978 | * | ||
| 979 | * This function creates a 'command' debugfs file for sending commands to the | ||
| 980 | * controller or getting the read command values. | ||
| 981 | * Drivers can use this as their &drm_driver->debugfs_init callback. | ||
| 982 | * | ||
| 983 | * Returns: | ||
| 984 | * Zero on success, negative error code on failure. | ||
| 985 | */ | ||
| 986 | int mipi_dbi_debugfs_init(struct drm_minor *minor) | ||
| 987 | { | ||
| 988 | struct tinydrm_device *tdev = minor->dev->dev_private; | ||
| 989 | struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); | ||
| 990 | umode_t mode = S_IFREG | S_IWUSR; | ||
| 991 | |||
| 992 | if (mipi->read_commands) | ||
| 993 | mode |= S_IRUGO; | ||
| 994 | debugfs_create_file("command", mode, minor->debugfs_root, mipi, | ||
| 995 | &mipi_dbi_debugfs_command_fops); | ||
| 996 | |||
| 997 | return drm_debugfs_create_files(mipi_dbi_debugfs_list, | ||
| 998 | ARRAY_SIZE(mipi_dbi_debugfs_list), | ||
| 999 | minor->debugfs_root, minor); | ||
| 1000 | } | ||
| 1001 | EXPORT_SYMBOL(mipi_dbi_debugfs_init); | ||
| 1002 | |||
| 1003 | #endif | ||
| 1004 | |||
| 1005 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 68ef993ab431..88169141bef5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
| @@ -66,8 +66,11 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
| 66 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) | 66 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
| 67 | goto out_unlock; | 67 | goto out_unlock; |
| 68 | 68 | ||
| 69 | ttm_bo_reference(bo); | ||
| 69 | up_read(&vma->vm_mm->mmap_sem); | 70 | up_read(&vma->vm_mm->mmap_sem); |
| 70 | (void) dma_fence_wait(bo->moving, true); | 71 | (void) dma_fence_wait(bo->moving, true); |
| 72 | ttm_bo_unreserve(bo); | ||
| 73 | ttm_bo_unref(&bo); | ||
| 71 | goto out_unlock; | 74 | goto out_unlock; |
| 72 | } | 75 | } |
| 73 | 76 | ||
| @@ -120,8 +123,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 120 | 123 | ||
| 121 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { | 124 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { |
| 122 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { | 125 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
| 126 | ttm_bo_reference(bo); | ||
| 123 | up_read(&vma->vm_mm->mmap_sem); | 127 | up_read(&vma->vm_mm->mmap_sem); |
| 124 | (void) ttm_bo_wait_unreserved(bo); | 128 | (void) ttm_bo_wait_unreserved(bo); |
| 129 | ttm_bo_unref(&bo); | ||
| 125 | } | 130 | } |
| 126 | 131 | ||
| 127 | return VM_FAULT_RETRY; | 132 | return VM_FAULT_RETRY; |
| @@ -166,6 +171,13 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 166 | ret = ttm_bo_vm_fault_idle(bo, vma, vmf); | 171 | ret = ttm_bo_vm_fault_idle(bo, vma, vmf); |
| 167 | if (unlikely(ret != 0)) { | 172 | if (unlikely(ret != 0)) { |
| 168 | retval = ret; | 173 | retval = ret; |
| 174 | |||
| 175 | if (retval == VM_FAULT_RETRY && | ||
| 176 | !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { | ||
| 177 | /* The BO has already been unreserved. */ | ||
| 178 | return retval; | ||
| 179 | } | ||
| 180 | |||
| 169 | goto out_unlock; | 181 | goto out_unlock; |
| 170 | } | 182 | } |
| 171 | 183 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 764320156cce..fd6f8d9669ce 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -841,7 +841,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc, | |||
| 841 | 841 | ||
| 842 | } | 842 | } |
| 843 | 843 | ||
| 844 | __drm_atomic_helper_crtc_destroy_state(state); | 844 | drm_atomic_helper_crtc_destroy_state(crtc, state); |
| 845 | } | 845 | } |
| 846 | 846 | ||
| 847 | static const struct drm_crtc_funcs vc4_crtc_funcs = { | 847 | static const struct drm_crtc_funcs vc4_crtc_funcs = { |
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index db920771bfb5..ab3016982466 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
| @@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) | |||
| 594 | args->shader_rec_count); | 594 | args->shader_rec_count); |
| 595 | struct vc4_bo *bo; | 595 | struct vc4_bo *bo; |
| 596 | 596 | ||
| 597 | if (uniforms_offset < shader_rec_offset || | 597 | if (shader_rec_offset < args->bin_cl_size || |
| 598 | uniforms_offset < shader_rec_offset || | ||
| 598 | exec_size < uniforms_offset || | 599 | exec_size < uniforms_offset || |
| 599 | args->shader_rec_count >= (UINT_MAX / | 600 | args->shader_rec_count >= (UINT_MAX / |
| 600 | sizeof(struct vc4_shader_state)) || | 601 | sizeof(struct vc4_shader_state)) || |
| 601 | temp_size < exec_size) { | 602 | temp_size < exec_size) { |
| 602 | DRM_ERROR("overflow in exec arguments\n"); | 603 | DRM_ERROR("overflow in exec arguments\n"); |
| 604 | ret = -EINVAL; | ||
| 603 | goto fail; | 605 | goto fail; |
| 604 | } | 606 | } |
| 605 | 607 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index c1f06897136b..f7a229df572d 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
| @@ -858,7 +858,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, | |||
| 858 | } | 858 | } |
| 859 | } | 859 | } |
| 860 | plane = &vc4_plane->base; | 860 | plane = &vc4_plane->base; |
| 861 | ret = drm_universal_plane_init(dev, plane, 0xff, | 861 | ret = drm_universal_plane_init(dev, plane, 0, |
| 862 | &vc4_plane_funcs, | 862 | &vc4_plane_funcs, |
| 863 | formats, num_formats, | 863 | formats, num_formats, |
| 864 | type, NULL); | 864 | type, NULL); |
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 08886a309757..5cdd003605f5 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c | |||
| @@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, | |||
| 461 | } | 461 | } |
| 462 | 462 | ||
| 463 | ret = vc4_full_res_bounds_check(exec, *obj, surf); | 463 | ret = vc4_full_res_bounds_check(exec, *obj, surf); |
| 464 | if (!ret) | 464 | if (ret) |
| 465 | return ret; | 465 | return ret; |
| 466 | 466 | ||
| 467 | return 0; | 467 | return 0; |
