diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/cik.c')
-rw-r--r-- | drivers/gpu/drm/radeon/cik.c | 377 |
1 files changed, 290 insertions, 87 deletions
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e950fabd7f5e..bbb17841a9e5 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -1697,7 +1697,7 @@ static void cik_srbm_select(struct radeon_device *rdev, | |||
1697 | * Load the GDDR MC ucode into the hw (CIK). | 1697 | * Load the GDDR MC ucode into the hw (CIK). |
1698 | * Returns 0 on success, error on failure. | 1698 | * Returns 0 on success, error on failure. |
1699 | */ | 1699 | */ |
1700 | static int ci_mc_load_microcode(struct radeon_device *rdev) | 1700 | int ci_mc_load_microcode(struct radeon_device *rdev) |
1701 | { | 1701 | { |
1702 | const __be32 *fw_data; | 1702 | const __be32 *fw_data; |
1703 | u32 running, blackout = 0; | 1703 | u32 running, blackout = 0; |
@@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width) | |||
3046 | } | 3046 | } |
3047 | 3047 | ||
3048 | /** | 3048 | /** |
3049 | * cik_select_se_sh - select which SE, SH to address | 3049 | * cik_get_rb_disabled - computes the mask of disabled RBs |
3050 | * | 3050 | * |
3051 | * @rdev: radeon_device pointer | 3051 | * @rdev: radeon_device pointer |
3052 | * @max_rb_num: max RBs (render backends) for the asic | 3052 | * @max_rb_num: max RBs (render backends) for the asic |
@@ -3487,6 +3487,51 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
3487 | } | 3487 | } |
3488 | 3488 | ||
3489 | /** | 3489 | /** |
3490 | * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp | ||
3491 | * | ||
3492 | * @rdev: radeon_device pointer | ||
3493 | * @ridx: radeon ring index | ||
3494 | * | ||
3495 | * Emits an hdp flush on the cp. | ||
3496 | */ | ||
3497 | static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev, | ||
3498 | int ridx) | ||
3499 | { | ||
3500 | struct radeon_ring *ring = &rdev->ring[ridx]; | ||
3501 | u32 ref_and_mask; | ||
3502 | |||
3503 | switch (ring->idx) { | ||
3504 | case CAYMAN_RING_TYPE_CP1_INDEX: | ||
3505 | case CAYMAN_RING_TYPE_CP2_INDEX: | ||
3506 | default: | ||
3507 | switch (ring->me) { | ||
3508 | case 0: | ||
3509 | ref_and_mask = CP2 << ring->pipe; | ||
3510 | break; | ||
3511 | case 1: | ||
3512 | ref_and_mask = CP6 << ring->pipe; | ||
3513 | break; | ||
3514 | default: | ||
3515 | return; | ||
3516 | } | ||
3517 | break; | ||
3518 | case RADEON_RING_TYPE_GFX_INDEX: | ||
3519 | ref_and_mask = CP0; | ||
3520 | break; | ||
3521 | } | ||
3522 | |||
3523 | radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
3524 | radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ | ||
3525 | WAIT_REG_MEM_FUNCTION(3) | /* == */ | ||
3526 | WAIT_REG_MEM_ENGINE(1))); /* pfp */ | ||
3527 | radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2); | ||
3528 | radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2); | ||
3529 | radeon_ring_write(ring, ref_and_mask); | ||
3530 | radeon_ring_write(ring, ref_and_mask); | ||
3531 | radeon_ring_write(ring, 0x20); /* poll interval */ | ||
3532 | } | ||
3533 | |||
3534 | /** | ||
3490 | * cik_fence_gfx_ring_emit - emit a fence on the gfx ring | 3535 | * cik_fence_gfx_ring_emit - emit a fence on the gfx ring |
3491 | * | 3536 | * |
3492 | * @rdev: radeon_device pointer | 3537 | * @rdev: radeon_device pointer |
@@ -3512,15 +3557,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, | |||
3512 | radeon_ring_write(ring, fence->seq); | 3557 | radeon_ring_write(ring, fence->seq); |
3513 | radeon_ring_write(ring, 0); | 3558 | radeon_ring_write(ring, 0); |
3514 | /* HDP flush */ | 3559 | /* HDP flush */ |
3515 | /* We should be using the new WAIT_REG_MEM special op packet here | 3560 | cik_hdp_flush_cp_ring_emit(rdev, fence->ring); |
3516 | * but it causes the CP to hang | ||
3517 | */ | ||
3518 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3519 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3520 | WRITE_DATA_DST_SEL(0))); | ||
3521 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
3522 | radeon_ring_write(ring, 0); | ||
3523 | radeon_ring_write(ring, 0); | ||
3524 | } | 3561 | } |
3525 | 3562 | ||
3526 | /** | 3563 | /** |
@@ -3550,15 +3587,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, | |||
3550 | radeon_ring_write(ring, fence->seq); | 3587 | radeon_ring_write(ring, fence->seq); |
3551 | radeon_ring_write(ring, 0); | 3588 | radeon_ring_write(ring, 0); |
3552 | /* HDP flush */ | 3589 | /* HDP flush */ |
3553 | /* We should be using the new WAIT_REG_MEM special op packet here | 3590 | cik_hdp_flush_cp_ring_emit(rdev, fence->ring); |
3554 | * but it causes the CP to hang | ||
3555 | */ | ||
3556 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3557 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3558 | WRITE_DATA_DST_SEL(0))); | ||
3559 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
3560 | radeon_ring_write(ring, 0); | ||
3561 | radeon_ring_write(ring, 0); | ||
3562 | } | 3591 | } |
3563 | 3592 | ||
3564 | bool cik_semaphore_ring_emit(struct radeon_device *rdev, | 3593 | bool cik_semaphore_ring_emit(struct radeon_device *rdev, |
@@ -3566,8 +3595,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, | |||
3566 | struct radeon_semaphore *semaphore, | 3595 | struct radeon_semaphore *semaphore, |
3567 | bool emit_wait) | 3596 | bool emit_wait) |
3568 | { | 3597 | { |
3569 | /* TODO: figure out why semaphore cause lockups */ | ||
3570 | #if 0 | ||
3571 | uint64_t addr = semaphore->gpu_addr; | 3598 | uint64_t addr = semaphore->gpu_addr; |
3572 | unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; | 3599 | unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; |
3573 | 3600 | ||
@@ -3576,9 +3603,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, | |||
3576 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); | 3603 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); |
3577 | 3604 | ||
3578 | return true; | 3605 | return true; |
3579 | #else | ||
3580 | return false; | ||
3581 | #endif | ||
3582 | } | 3606 | } |
3583 | 3607 | ||
3584 | /** | 3608 | /** |
@@ -3816,6 +3840,8 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable) | |||
3816 | if (enable) | 3840 | if (enable) |
3817 | WREG32(CP_ME_CNTL, 0); | 3841 | WREG32(CP_ME_CNTL, 0); |
3818 | else { | 3842 | else { |
3843 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) | ||
3844 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
3819 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); | 3845 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); |
3820 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 3846 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
3821 | } | 3847 | } |
@@ -4014,18 +4040,50 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev) | |||
4014 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 4040 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
4015 | return r; | 4041 | return r; |
4016 | } | 4042 | } |
4043 | |||
4044 | if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) | ||
4045 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
4046 | |||
4017 | return 0; | 4047 | return 0; |
4018 | } | 4048 | } |
4019 | 4049 | ||
4020 | u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | 4050 | u32 cik_gfx_get_rptr(struct radeon_device *rdev, |
4021 | struct radeon_ring *ring) | 4051 | struct radeon_ring *ring) |
4022 | { | 4052 | { |
4023 | u32 rptr; | 4053 | u32 rptr; |
4024 | 4054 | ||
4055 | if (rdev->wb.enabled) | ||
4056 | rptr = rdev->wb.wb[ring->rptr_offs/4]; | ||
4057 | else | ||
4058 | rptr = RREG32(CP_RB0_RPTR); | ||
4059 | |||
4060 | return rptr; | ||
4061 | } | ||
4062 | |||
4063 | u32 cik_gfx_get_wptr(struct radeon_device *rdev, | ||
4064 | struct radeon_ring *ring) | ||
4065 | { | ||
4066 | u32 wptr; | ||
4067 | |||
4068 | wptr = RREG32(CP_RB0_WPTR); | ||
4069 | |||
4070 | return wptr; | ||
4071 | } | ||
4072 | |||
4073 | void cik_gfx_set_wptr(struct radeon_device *rdev, | ||
4074 | struct radeon_ring *ring) | ||
4075 | { | ||
4076 | WREG32(CP_RB0_WPTR, ring->wptr); | ||
4077 | (void)RREG32(CP_RB0_WPTR); | ||
4078 | } | ||
4025 | 4079 | ||
4080 | u32 cik_compute_get_rptr(struct radeon_device *rdev, | ||
4081 | struct radeon_ring *ring) | ||
4082 | { | ||
4083 | u32 rptr; | ||
4026 | 4084 | ||
4027 | if (rdev->wb.enabled) { | 4085 | if (rdev->wb.enabled) { |
4028 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | 4086 | rptr = rdev->wb.wb[ring->rptr_offs/4]; |
4029 | } else { | 4087 | } else { |
4030 | mutex_lock(&rdev->srbm_mutex); | 4088 | mutex_lock(&rdev->srbm_mutex); |
4031 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 4089 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
@@ -4037,13 +4095,14 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | |||
4037 | return rptr; | 4095 | return rptr; |
4038 | } | 4096 | } |
4039 | 4097 | ||
4040 | u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | 4098 | u32 cik_compute_get_wptr(struct radeon_device *rdev, |
4041 | struct radeon_ring *ring) | 4099 | struct radeon_ring *ring) |
4042 | { | 4100 | { |
4043 | u32 wptr; | 4101 | u32 wptr; |
4044 | 4102 | ||
4045 | if (rdev->wb.enabled) { | 4103 | if (rdev->wb.enabled) { |
4046 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); | 4104 | /* XXX check if swapping is necessary on BE */ |
4105 | wptr = rdev->wb.wb[ring->wptr_offs/4]; | ||
4047 | } else { | 4106 | } else { |
4048 | mutex_lock(&rdev->srbm_mutex); | 4107 | mutex_lock(&rdev->srbm_mutex); |
4049 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 4108 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
@@ -4055,10 +4114,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | |||
4055 | return wptr; | 4114 | return wptr; |
4056 | } | 4115 | } |
4057 | 4116 | ||
4058 | void cik_compute_ring_set_wptr(struct radeon_device *rdev, | 4117 | void cik_compute_set_wptr(struct radeon_device *rdev, |
4059 | struct radeon_ring *ring) | 4118 | struct radeon_ring *ring) |
4060 | { | 4119 | { |
4061 | rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); | 4120 | /* XXX check if swapping is necessary on BE */ |
4121 | rdev->wb.wb[ring->wptr_offs/4] = ring->wptr; | ||
4062 | WDOORBELL32(ring->doorbell_index, ring->wptr); | 4122 | WDOORBELL32(ring->doorbell_index, ring->wptr); |
4063 | } | 4123 | } |
4064 | 4124 | ||
@@ -4074,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) | |||
4074 | { | 4134 | { |
4075 | if (enable) | 4135 | if (enable) |
4076 | WREG32(CP_MEC_CNTL, 0); | 4136 | WREG32(CP_MEC_CNTL, 0); |
4077 | else | 4137 | else { |
4078 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); | 4138 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); |
4139 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | ||
4140 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | ||
4141 | } | ||
4079 | udelay(50); | 4142 | udelay(50); |
4080 | } | 4143 | } |
4081 | 4144 | ||
@@ -4852,6 +4915,160 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
4852 | cik_print_gpu_status_regs(rdev); | 4915 | cik_print_gpu_status_regs(rdev); |
4853 | } | 4916 | } |
4854 | 4917 | ||
4918 | struct kv_reset_save_regs { | ||
4919 | u32 gmcon_reng_execute; | ||
4920 | u32 gmcon_misc; | ||
4921 | u32 gmcon_misc3; | ||
4922 | }; | ||
4923 | |||
4924 | static void kv_save_regs_for_reset(struct radeon_device *rdev, | ||
4925 | struct kv_reset_save_regs *save) | ||
4926 | { | ||
4927 | save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE); | ||
4928 | save->gmcon_misc = RREG32(GMCON_MISC); | ||
4929 | save->gmcon_misc3 = RREG32(GMCON_MISC3); | ||
4930 | |||
4931 | WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP); | ||
4932 | WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE | | ||
4933 | STCTRL_STUTTER_EN)); | ||
4934 | } | ||
4935 | |||
4936 | static void kv_restore_regs_for_reset(struct radeon_device *rdev, | ||
4937 | struct kv_reset_save_regs *save) | ||
4938 | { | ||
4939 | int i; | ||
4940 | |||
4941 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4942 | WREG32(GMCON_PGFSM_CONFIG, 0x200010ff); | ||
4943 | |||
4944 | for (i = 0; i < 5; i++) | ||
4945 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4946 | |||
4947 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4948 | WREG32(GMCON_PGFSM_CONFIG, 0x300010ff); | ||
4949 | |||
4950 | for (i = 0; i < 5; i++) | ||
4951 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4952 | |||
4953 | WREG32(GMCON_PGFSM_WRITE, 0x210000); | ||
4954 | WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff); | ||
4955 | |||
4956 | for (i = 0; i < 5; i++) | ||
4957 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4958 | |||
4959 | WREG32(GMCON_PGFSM_WRITE, 0x21003); | ||
4960 | WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff); | ||
4961 | |||
4962 | for (i = 0; i < 5; i++) | ||
4963 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4964 | |||
4965 | WREG32(GMCON_PGFSM_WRITE, 0x2b00); | ||
4966 | WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff); | ||
4967 | |||
4968 | for (i = 0; i < 5; i++) | ||
4969 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4970 | |||
4971 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4972 | WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff); | ||
4973 | |||
4974 | for (i = 0; i < 5; i++) | ||
4975 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4976 | |||
4977 | WREG32(GMCON_PGFSM_WRITE, 0x420000); | ||
4978 | WREG32(GMCON_PGFSM_CONFIG, 0x100010ff); | ||
4979 | |||
4980 | for (i = 0; i < 5; i++) | ||
4981 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4982 | |||
4983 | WREG32(GMCON_PGFSM_WRITE, 0x120202); | ||
4984 | WREG32(GMCON_PGFSM_CONFIG, 0x500010ff); | ||
4985 | |||
4986 | for (i = 0; i < 5; i++) | ||
4987 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4988 | |||
4989 | WREG32(GMCON_PGFSM_WRITE, 0x3e3e36); | ||
4990 | WREG32(GMCON_PGFSM_CONFIG, 0x600010ff); | ||
4991 | |||
4992 | for (i = 0; i < 5; i++) | ||
4993 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
4994 | |||
4995 | WREG32(GMCON_PGFSM_WRITE, 0x373f3e); | ||
4996 | WREG32(GMCON_PGFSM_CONFIG, 0x700010ff); | ||
4997 | |||
4998 | for (i = 0; i < 5; i++) | ||
4999 | WREG32(GMCON_PGFSM_WRITE, 0); | ||
5000 | |||
5001 | WREG32(GMCON_PGFSM_WRITE, 0x3e1332); | ||
5002 | WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff); | ||
5003 | |||
5004 | WREG32(GMCON_MISC3, save->gmcon_misc3); | ||
5005 | WREG32(GMCON_MISC, save->gmcon_misc); | ||
5006 | WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute); | ||
5007 | } | ||
5008 | |||
5009 | static void cik_gpu_pci_config_reset(struct radeon_device *rdev) | ||
5010 | { | ||
5011 | struct evergreen_mc_save save; | ||
5012 | struct kv_reset_save_regs kv_save = { 0 }; | ||
5013 | u32 tmp, i; | ||
5014 | |||
5015 | dev_info(rdev->dev, "GPU pci config reset\n"); | ||
5016 | |||
5017 | /* disable dpm? */ | ||
5018 | |||
5019 | /* disable cg/pg */ | ||
5020 | cik_fini_pg(rdev); | ||
5021 | cik_fini_cg(rdev); | ||
5022 | |||
5023 | /* Disable GFX parsing/prefetching */ | ||
5024 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); | ||
5025 | |||
5026 | /* Disable MEC parsing/prefetching */ | ||
5027 | WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT); | ||
5028 | |||
5029 | /* sdma0 */ | ||
5030 | tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET); | ||
5031 | tmp |= SDMA_HALT; | ||
5032 | WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp); | ||
5033 | /* sdma1 */ | ||
5034 | tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET); | ||
5035 | tmp |= SDMA_HALT; | ||
5036 | WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp); | ||
5037 | /* XXX other engines? */ | ||
5038 | |||
5039 | /* halt the rlc, disable cp internal ints */ | ||
5040 | cik_rlc_stop(rdev); | ||
5041 | |||
5042 | udelay(50); | ||
5043 | |||
5044 | /* disable mem access */ | ||
5045 | evergreen_mc_stop(rdev, &save); | ||
5046 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
5047 | dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); | ||
5048 | } | ||
5049 | |||
5050 | if (rdev->flags & RADEON_IS_IGP) | ||
5051 | kv_save_regs_for_reset(rdev, &kv_save); | ||
5052 | |||
5053 | /* disable BM */ | ||
5054 | pci_clear_master(rdev->pdev); | ||
5055 | /* reset */ | ||
5056 | radeon_pci_config_reset(rdev); | ||
5057 | |||
5058 | udelay(100); | ||
5059 | |||
5060 | /* wait for asic to come out of reset */ | ||
5061 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
5062 | if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) | ||
5063 | break; | ||
5064 | udelay(1); | ||
5065 | } | ||
5066 | |||
5067 | /* does asic init need to be run first??? */ | ||
5068 | if (rdev->flags & RADEON_IS_IGP) | ||
5069 | kv_restore_regs_for_reset(rdev, &kv_save); | ||
5070 | } | ||
5071 | |||
4855 | /** | 5072 | /** |
4856 | * cik_asic_reset - soft reset GPU | 5073 | * cik_asic_reset - soft reset GPU |
4857 | * | 5074 | * |
@@ -4870,10 +5087,17 @@ int cik_asic_reset(struct radeon_device *rdev) | |||
4870 | if (reset_mask) | 5087 | if (reset_mask) |
4871 | r600_set_bios_scratch_engine_hung(rdev, true); | 5088 | r600_set_bios_scratch_engine_hung(rdev, true); |
4872 | 5089 | ||
5090 | /* try soft reset */ | ||
4873 | cik_gpu_soft_reset(rdev, reset_mask); | 5091 | cik_gpu_soft_reset(rdev, reset_mask); |
4874 | 5092 | ||
4875 | reset_mask = cik_gpu_check_soft_reset(rdev); | 5093 | reset_mask = cik_gpu_check_soft_reset(rdev); |
4876 | 5094 | ||
5095 | /* try pci config reset */ | ||
5096 | if (reset_mask && radeon_hard_reset) | ||
5097 | cik_gpu_pci_config_reset(rdev); | ||
5098 | |||
5099 | reset_mask = cik_gpu_check_soft_reset(rdev); | ||
5100 | |||
4877 | if (!reset_mask) | 5101 | if (!reset_mask) |
4878 | r600_set_bios_scratch_engine_hung(rdev, false); | 5102 | r600_set_bios_scratch_engine_hung(rdev, false); |
4879 | 5103 | ||
@@ -5138,20 +5362,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
5138 | WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | | 5362 | WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
5139 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); | 5363 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); |
5140 | 5364 | ||
5141 | /* TC cache setup ??? */ | ||
5142 | WREG32(TC_CFG_L1_LOAD_POLICY0, 0); | ||
5143 | WREG32(TC_CFG_L1_LOAD_POLICY1, 0); | ||
5144 | WREG32(TC_CFG_L1_STORE_POLICY, 0); | ||
5145 | |||
5146 | WREG32(TC_CFG_L2_LOAD_POLICY0, 0); | ||
5147 | WREG32(TC_CFG_L2_LOAD_POLICY1, 0); | ||
5148 | WREG32(TC_CFG_L2_STORE_POLICY0, 0); | ||
5149 | WREG32(TC_CFG_L2_STORE_POLICY1, 0); | ||
5150 | WREG32(TC_CFG_L2_ATOMIC_POLICY, 0); | ||
5151 | |||
5152 | WREG32(TC_CFG_L1_VOLATILE, 0); | ||
5153 | WREG32(TC_CFG_L2_VOLATILE, 0); | ||
5154 | |||
5155 | if (rdev->family == CHIP_KAVERI) { | 5365 | if (rdev->family == CHIP_KAVERI) { |
5156 | u32 tmp = RREG32(CHUB_CONTROL); | 5366 | u32 tmp = RREG32(CHUB_CONTROL); |
5157 | tmp &= ~BYPASS_VM; | 5367 | tmp &= ~BYPASS_VM; |
@@ -5367,16 +5577,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
5367 | radeon_ring_write(ring, VMID(0)); | 5577 | radeon_ring_write(ring, VMID(0)); |
5368 | 5578 | ||
5369 | /* HDP flush */ | 5579 | /* HDP flush */ |
5370 | /* We should be using the WAIT_REG_MEM packet here like in | 5580 | cik_hdp_flush_cp_ring_emit(rdev, ridx); |
5371 | * cik_fence_ring_emit(), but it causes the CP to hang in this | ||
5372 | * context... | ||
5373 | */ | ||
5374 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
5375 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
5376 | WRITE_DATA_DST_SEL(0))); | ||
5377 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
5378 | radeon_ring_write(ring, 0); | ||
5379 | radeon_ring_write(ring, 0); | ||
5380 | 5581 | ||
5381 | /* bits 0-15 are the VM contexts0-15 */ | 5582 | /* bits 0-15 are the VM contexts0-15 */ |
5382 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 5583 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
@@ -7503,26 +7704,7 @@ static int cik_startup(struct radeon_device *rdev) | |||
7503 | 7704 | ||
7504 | cik_mc_program(rdev); | 7705 | cik_mc_program(rdev); |
7505 | 7706 | ||
7506 | if (rdev->flags & RADEON_IS_IGP) { | 7707 | if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) { |
7507 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
7508 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | ||
7509 | r = cik_init_microcode(rdev); | ||
7510 | if (r) { | ||
7511 | DRM_ERROR("Failed to load firmware!\n"); | ||
7512 | return r; | ||
7513 | } | ||
7514 | } | ||
7515 | } else { | ||
7516 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
7517 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw || | ||
7518 | !rdev->mc_fw) { | ||
7519 | r = cik_init_microcode(rdev); | ||
7520 | if (r) { | ||
7521 | DRM_ERROR("Failed to load firmware!\n"); | ||
7522 | return r; | ||
7523 | } | ||
7524 | } | ||
7525 | |||
7526 | r = ci_mc_load_microcode(rdev); | 7708 | r = ci_mc_load_microcode(rdev); |
7527 | if (r) { | 7709 | if (r) { |
7528 | DRM_ERROR("Failed to load MC firmware!\n"); | 7710 | DRM_ERROR("Failed to load MC firmware!\n"); |
@@ -7627,7 +7809,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
7627 | 7809 | ||
7628 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 7810 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
7629 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 7811 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
7630 | CP_RB0_RPTR, CP_RB0_WPTR, | ||
7631 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7812 | PACKET3(PACKET3_NOP, 0x3FFF)); |
7632 | if (r) | 7813 | if (r) |
7633 | return r; | 7814 | return r; |
@@ -7636,7 +7817,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
7636 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 7817 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
7637 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | 7818 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; |
7638 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, | 7819 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, |
7639 | CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, | ||
7640 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7820 | PACKET3(PACKET3_NOP, 0x3FFF)); |
7641 | if (r) | 7821 | if (r) |
7642 | return r; | 7822 | return r; |
@@ -7648,7 +7828,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
7648 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 7828 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
7649 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | 7829 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; |
7650 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, | 7830 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, |
7651 | CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, | ||
7652 | PACKET3(PACKET3_NOP, 0x3FFF)); | 7831 | PACKET3(PACKET3_NOP, 0x3FFF)); |
7653 | if (r) | 7832 | if (r) |
7654 | return r; | 7833 | return r; |
@@ -7660,16 +7839,12 @@ static int cik_startup(struct radeon_device *rdev) | |||
7660 | 7839 | ||
7661 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | 7840 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
7662 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | 7841 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
7663 | SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET, | ||
7664 | SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET, | ||
7665 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); | 7842 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); |
7666 | if (r) | 7843 | if (r) |
7667 | return r; | 7844 | return r; |
7668 | 7845 | ||
7669 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | 7846 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
7670 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, | 7847 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
7671 | SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET, | ||
7672 | SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET, | ||
7673 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); | 7848 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); |
7674 | if (r) | 7849 | if (r) |
7675 | return r; | 7850 | return r; |
@@ -7685,7 +7860,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
7685 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 7860 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
7686 | if (ring->ring_size) { | 7861 | if (ring->ring_size) { |
7687 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | 7862 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
7688 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | ||
7689 | RADEON_CP_PACKET2); | 7863 | RADEON_CP_PACKET2); |
7690 | if (!r) | 7864 | if (!r) |
7691 | r = uvd_v1_0_init(rdev); | 7865 | r = uvd_v1_0_init(rdev); |
@@ -7731,6 +7905,9 @@ int cik_resume(struct radeon_device *rdev) | |||
7731 | /* init golden registers */ | 7905 | /* init golden registers */ |
7732 | cik_init_golden_registers(rdev); | 7906 | cik_init_golden_registers(rdev); |
7733 | 7907 | ||
7908 | if (rdev->pm.pm_method == PM_METHOD_DPM) | ||
7909 | radeon_pm_resume(rdev); | ||
7910 | |||
7734 | rdev->accel_working = true; | 7911 | rdev->accel_working = true; |
7735 | r = cik_startup(rdev); | 7912 | r = cik_startup(rdev); |
7736 | if (r) { | 7913 | if (r) { |
@@ -7754,6 +7931,7 @@ int cik_resume(struct radeon_device *rdev) | |||
7754 | */ | 7931 | */ |
7755 | int cik_suspend(struct radeon_device *rdev) | 7932 | int cik_suspend(struct radeon_device *rdev) |
7756 | { | 7933 | { |
7934 | radeon_pm_suspend(rdev); | ||
7757 | dce6_audio_fini(rdev); | 7935 | dce6_audio_fini(rdev); |
7758 | radeon_vm_manager_fini(rdev); | 7936 | radeon_vm_manager_fini(rdev); |
7759 | cik_cp_enable(rdev, false); | 7937 | cik_cp_enable(rdev, false); |
@@ -7835,6 +8013,30 @@ int cik_init(struct radeon_device *rdev) | |||
7835 | if (r) | 8013 | if (r) |
7836 | return r; | 8014 | return r; |
7837 | 8015 | ||
8016 | if (rdev->flags & RADEON_IS_IGP) { | ||
8017 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
8018 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | ||
8019 | r = cik_init_microcode(rdev); | ||
8020 | if (r) { | ||
8021 | DRM_ERROR("Failed to load firmware!\n"); | ||
8022 | return r; | ||
8023 | } | ||
8024 | } | ||
8025 | } else { | ||
8026 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | ||
8027 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw || | ||
8028 | !rdev->mc_fw) { | ||
8029 | r = cik_init_microcode(rdev); | ||
8030 | if (r) { | ||
8031 | DRM_ERROR("Failed to load firmware!\n"); | ||
8032 | return r; | ||
8033 | } | ||
8034 | } | ||
8035 | } | ||
8036 | |||
8037 | /* Initialize power management */ | ||
8038 | radeon_pm_init(rdev); | ||
8039 | |||
7838 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 8040 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
7839 | ring->ring_obj = NULL; | 8041 | ring->ring_obj = NULL; |
7840 | r600_ring_init(rdev, ring, 1024 * 1024); | 8042 | r600_ring_init(rdev, ring, 1024 * 1024); |
@@ -7915,6 +8117,7 @@ int cik_init(struct radeon_device *rdev) | |||
7915 | */ | 8117 | */ |
7916 | void cik_fini(struct radeon_device *rdev) | 8118 | void cik_fini(struct radeon_device *rdev) |
7917 | { | 8119 | { |
8120 | radeon_pm_fini(rdev); | ||
7918 | cik_cp_fini(rdev); | 8121 | cik_cp_fini(rdev); |
7919 | cik_sdma_fini(rdev); | 8122 | cik_sdma_fini(rdev); |
7920 | cik_fini_pg(rdev); | 8123 | cik_fini_pg(rdev); |