diff options
37 files changed, 729 insertions, 170 deletions
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index dfac7965ea28..32923d2f6002 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
707 | switch (connector->connector_type) { | 707 | switch (connector->connector_type) { |
708 | case DRM_MODE_CONNECTOR_DVII: | 708 | case DRM_MODE_CONNECTOR_DVII: |
709 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | 709 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ |
710 | if (drm_detect_hdmi_monitor(radeon_connector->edid) && | 710 | if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || |
711 | radeon_audio) | 711 | (drm_detect_hdmi_monitor(radeon_connector->edid) && |
712 | (radeon_connector->audio == RADEON_AUDIO_AUTO))) | ||
712 | return ATOM_ENCODER_MODE_HDMI; | 713 | return ATOM_ENCODER_MODE_HDMI; |
713 | else if (radeon_connector->use_digital) | 714 | else if (radeon_connector->use_digital) |
714 | return ATOM_ENCODER_MODE_DVI; | 715 | return ATOM_ENCODER_MODE_DVI; |
@@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
718 | case DRM_MODE_CONNECTOR_DVID: | 719 | case DRM_MODE_CONNECTOR_DVID: |
719 | case DRM_MODE_CONNECTOR_HDMIA: | 720 | case DRM_MODE_CONNECTOR_HDMIA: |
720 | default: | 721 | default: |
721 | if (drm_detect_hdmi_monitor(radeon_connector->edid) && | 722 | if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || |
722 | radeon_audio) | 723 | (drm_detect_hdmi_monitor(radeon_connector->edid) && |
724 | (radeon_connector->audio == RADEON_AUDIO_AUTO))) | ||
723 | return ATOM_ENCODER_MODE_HDMI; | 725 | return ATOM_ENCODER_MODE_HDMI; |
724 | else | 726 | else |
725 | return ATOM_ENCODER_MODE_DVI; | 727 | return ATOM_ENCODER_MODE_DVI; |
@@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
732 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 734 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
733 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | 735 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) |
734 | return ATOM_ENCODER_MODE_DP; | 736 | return ATOM_ENCODER_MODE_DP; |
735 | else if (drm_detect_hdmi_monitor(radeon_connector->edid) && | 737 | else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || |
736 | radeon_audio) | 738 | (drm_detect_hdmi_monitor(radeon_connector->edid) && |
739 | (radeon_connector->audio == RADEON_AUDIO_AUTO))) | ||
737 | return ATOM_ENCODER_MODE_HDMI; | 740 | return ATOM_ENCODER_MODE_HDMI; |
738 | else | 741 | else |
739 | return ATOM_ENCODER_MODE_DVI; | 742 | return ATOM_ENCODER_MODE_DVI; |
@@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1647 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); | 1650 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); |
1648 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); | 1651 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); |
1649 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | 1652 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); |
1650 | /* some early dce3.2 boards have a bug in their transmitter control table */ | 1653 | /* some dce3.x boards have a bug in their transmitter control table. |
1651 | if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) | 1654 | * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE |
1655 | * does the same thing and more. | ||
1656 | */ | ||
1657 | if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) && | ||
1658 | (rdev->family != CHIP_RS880)) | ||
1652 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | 1659 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); |
1653 | } | 1660 | } |
1654 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { | 1661 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c index 53b43dd3cf1e..252e10a41cf5 100644 --- a/drivers/gpu/drm/radeon/ci_smc.c +++ b/drivers/gpu/drm/radeon/ci_smc.c | |||
@@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev, | |||
47 | u32 smc_start_address, | 47 | u32 smc_start_address, |
48 | const u8 *src, u32 byte_count, u32 limit) | 48 | const u8 *src, u32 byte_count, u32 limit) |
49 | { | 49 | { |
50 | unsigned long flags; | ||
50 | u32 data, original_data; | 51 | u32 data, original_data; |
51 | u32 addr; | 52 | u32 addr; |
52 | u32 extra_shift; | 53 | u32 extra_shift; |
53 | int ret; | 54 | int ret = 0; |
54 | 55 | ||
55 | if (smc_start_address & 3) | 56 | if (smc_start_address & 3) |
56 | return -EINVAL; | 57 | return -EINVAL; |
@@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev, | |||
59 | 60 | ||
60 | addr = smc_start_address; | 61 | addr = smc_start_address; |
61 | 62 | ||
63 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
62 | while (byte_count >= 4) { | 64 | while (byte_count >= 4) { |
63 | /* SMC address space is BE */ | 65 | /* SMC address space is BE */ |
64 | data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; | 66 | data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; |
65 | 67 | ||
66 | ret = ci_set_smc_sram_address(rdev, addr, limit); | 68 | ret = ci_set_smc_sram_address(rdev, addr, limit); |
67 | if (ret) | 69 | if (ret) |
68 | return ret; | 70 | goto done; |
69 | 71 | ||
70 | WREG32(SMC_IND_DATA_0, data); | 72 | WREG32(SMC_IND_DATA_0, data); |
71 | 73 | ||
@@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev, | |||
80 | 82 | ||
81 | ret = ci_set_smc_sram_address(rdev, addr, limit); | 83 | ret = ci_set_smc_sram_address(rdev, addr, limit); |
82 | if (ret) | 84 | if (ret) |
83 | return ret; | 85 | goto done; |
84 | 86 | ||
85 | original_data = RREG32(SMC_IND_DATA_0); | 87 | original_data = RREG32(SMC_IND_DATA_0); |
86 | 88 | ||
@@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev, | |||
97 | 99 | ||
98 | ret = ci_set_smc_sram_address(rdev, addr, limit); | 100 | ret = ci_set_smc_sram_address(rdev, addr, limit); |
99 | if (ret) | 101 | if (ret) |
100 | return ret; | 102 | goto done; |
101 | 103 | ||
102 | WREG32(SMC_IND_DATA_0, data); | 104 | WREG32(SMC_IND_DATA_0, data); |
103 | } | 105 | } |
104 | return 0; | 106 | |
107 | done: | ||
108 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
109 | |||
110 | return ret; | ||
105 | } | 111 | } |
106 | 112 | ||
107 | void ci_start_smc(struct radeon_device *rdev) | 113 | void ci_start_smc(struct radeon_device *rdev) |
@@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev) | |||
197 | 203 | ||
198 | int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) | 204 | int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) |
199 | { | 205 | { |
206 | unsigned long flags; | ||
200 | u32 ucode_start_address; | 207 | u32 ucode_start_address; |
201 | u32 ucode_size; | 208 | u32 ucode_size; |
202 | const u8 *src; | 209 | const u8 *src; |
@@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) | |||
219 | return -EINVAL; | 226 | return -EINVAL; |
220 | 227 | ||
221 | src = (const u8 *)rdev->smc_fw->data; | 228 | src = (const u8 *)rdev->smc_fw->data; |
229 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
222 | WREG32(SMC_IND_INDEX_0, ucode_start_address); | 230 | WREG32(SMC_IND_INDEX_0, ucode_start_address); |
223 | WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); | 231 | WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); |
224 | while (ucode_size >= 4) { | 232 | while (ucode_size >= 4) { |
@@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) | |||
231 | ucode_size -= 4; | 239 | ucode_size -= 4; |
232 | } | 240 | } |
233 | WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); | 241 | WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); |
242 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
234 | 243 | ||
235 | return 0; | 244 | return 0; |
236 | } | 245 | } |
@@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) | |||
238 | int ci_read_smc_sram_dword(struct radeon_device *rdev, | 247 | int ci_read_smc_sram_dword(struct radeon_device *rdev, |
239 | u32 smc_address, u32 *value, u32 limit) | 248 | u32 smc_address, u32 *value, u32 limit) |
240 | { | 249 | { |
250 | unsigned long flags; | ||
241 | int ret; | 251 | int ret; |
242 | 252 | ||
253 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
243 | ret = ci_set_smc_sram_address(rdev, smc_address, limit); | 254 | ret = ci_set_smc_sram_address(rdev, smc_address, limit); |
244 | if (ret) | 255 | if (ret == 0) |
245 | return ret; | 256 | *value = RREG32(SMC_IND_DATA_0); |
257 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
246 | 258 | ||
247 | *value = RREG32(SMC_IND_DATA_0); | 259 | return ret; |
248 | return 0; | ||
249 | } | 260 | } |
250 | 261 | ||
251 | int ci_write_smc_sram_dword(struct radeon_device *rdev, | 262 | int ci_write_smc_sram_dword(struct radeon_device *rdev, |
252 | u32 smc_address, u32 value, u32 limit) | 263 | u32 smc_address, u32 value, u32 limit) |
253 | { | 264 | { |
265 | unsigned long flags; | ||
254 | int ret; | 266 | int ret; |
255 | 267 | ||
268 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
256 | ret = ci_set_smc_sram_address(rdev, smc_address, limit); | 269 | ret = ci_set_smc_sram_address(rdev, smc_address, limit); |
257 | if (ret) | 270 | if (ret == 0) |
258 | return ret; | 271 | WREG32(SMC_IND_DATA_0, value); |
272 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
259 | 273 | ||
260 | WREG32(SMC_IND_DATA_0, value); | 274 | return ret; |
261 | return 0; | ||
262 | } | 275 | } |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a3bba0587276..adbdb6503b05 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev); | |||
77 | static void cik_program_aspm(struct radeon_device *rdev); | 77 | static void cik_program_aspm(struct radeon_device *rdev); |
78 | static void cik_init_pg(struct radeon_device *rdev); | 78 | static void cik_init_pg(struct radeon_device *rdev); |
79 | static void cik_init_cg(struct radeon_device *rdev); | 79 | static void cik_init_cg(struct radeon_device *rdev); |
80 | static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, | ||
81 | bool enable); | ||
80 | 82 | ||
81 | /* get temperature in millidegrees */ | 83 | /* get temperature in millidegrees */ |
82 | int ci_get_temp(struct radeon_device *rdev) | 84 | int ci_get_temp(struct radeon_device *rdev) |
@@ -120,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev) | |||
120 | */ | 122 | */ |
121 | u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) | 123 | u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) |
122 | { | 124 | { |
125 | unsigned long flags; | ||
123 | u32 r; | 126 | u32 r; |
124 | 127 | ||
128 | spin_lock_irqsave(&rdev->pciep_idx_lock, flags); | ||
125 | WREG32(PCIE_INDEX, reg); | 129 | WREG32(PCIE_INDEX, reg); |
126 | (void)RREG32(PCIE_INDEX); | 130 | (void)RREG32(PCIE_INDEX); |
127 | r = RREG32(PCIE_DATA); | 131 | r = RREG32(PCIE_DATA); |
132 | spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); | ||
128 | return r; | 133 | return r; |
129 | } | 134 | } |
130 | 135 | ||
131 | void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 136 | void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
132 | { | 137 | { |
138 | unsigned long flags; | ||
139 | |||
140 | spin_lock_irqsave(&rdev->pciep_idx_lock, flags); | ||
133 | WREG32(PCIE_INDEX, reg); | 141 | WREG32(PCIE_INDEX, reg); |
134 | (void)RREG32(PCIE_INDEX); | 142 | (void)RREG32(PCIE_INDEX); |
135 | WREG32(PCIE_DATA, v); | 143 | WREG32(PCIE_DATA, v); |
136 | (void)RREG32(PCIE_DATA); | 144 | (void)RREG32(PCIE_DATA); |
145 | spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); | ||
137 | } | 146 | } |
138 | 147 | ||
139 | static const u32 spectre_rlc_save_restore_register_list[] = | 148 | static const u32 spectre_rlc_save_restore_register_list[] = |
@@ -2722,7 +2731,8 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
2722 | } else if ((rdev->pdev->device == 0x1309) || | 2731 | } else if ((rdev->pdev->device == 0x1309) || |
2723 | (rdev->pdev->device == 0x130A) || | 2732 | (rdev->pdev->device == 0x130A) || |
2724 | (rdev->pdev->device == 0x130D) || | 2733 | (rdev->pdev->device == 0x130D) || |
2725 | (rdev->pdev->device == 0x1313)) { | 2734 | (rdev->pdev->device == 0x1313) || |
2735 | (rdev->pdev->device == 0x131D)) { | ||
2726 | rdev->config.cik.max_cu_per_sh = 6; | 2736 | rdev->config.cik.max_cu_per_sh = 6; |
2727 | rdev->config.cik.max_backends_per_se = 2; | 2737 | rdev->config.cik.max_backends_per_se = 2; |
2728 | } else if ((rdev->pdev->device == 0x1306) || | 2738 | } else if ((rdev->pdev->device == 0x1306) || |
@@ -4013,6 +4023,8 @@ static int cik_cp_resume(struct radeon_device *rdev) | |||
4013 | { | 4023 | { |
4014 | int r; | 4024 | int r; |
4015 | 4025 | ||
4026 | cik_enable_gui_idle_interrupt(rdev, false); | ||
4027 | |||
4016 | r = cik_cp_load_microcode(rdev); | 4028 | r = cik_cp_load_microcode(rdev); |
4017 | if (r) | 4029 | if (r) |
4018 | return r; | 4030 | return r; |
@@ -4024,6 +4036,8 @@ static int cik_cp_resume(struct radeon_device *rdev) | |||
4024 | if (r) | 4036 | if (r) |
4025 | return r; | 4037 | return r; |
4026 | 4038 | ||
4039 | cik_enable_gui_idle_interrupt(rdev, true); | ||
4040 | |||
4027 | return 0; | 4041 | return 0; |
4028 | } | 4042 | } |
4029 | 4043 | ||
@@ -5376,7 +5390,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev, | |||
5376 | void cik_update_cg(struct radeon_device *rdev, | 5390 | void cik_update_cg(struct radeon_device *rdev, |
5377 | u32 block, bool enable) | 5391 | u32 block, bool enable) |
5378 | { | 5392 | { |
5393 | |||
5379 | if (block & RADEON_CG_BLOCK_GFX) { | 5394 | if (block & RADEON_CG_BLOCK_GFX) { |
5395 | cik_enable_gui_idle_interrupt(rdev, false); | ||
5380 | /* order matters! */ | 5396 | /* order matters! */ |
5381 | if (enable) { | 5397 | if (enable) { |
5382 | cik_enable_mgcg(rdev, true); | 5398 | cik_enable_mgcg(rdev, true); |
@@ -5385,6 +5401,7 @@ void cik_update_cg(struct radeon_device *rdev, | |||
5385 | cik_enable_cgcg(rdev, false); | 5401 | cik_enable_cgcg(rdev, false); |
5386 | cik_enable_mgcg(rdev, false); | 5402 | cik_enable_mgcg(rdev, false); |
5387 | } | 5403 | } |
5404 | cik_enable_gui_idle_interrupt(rdev, true); | ||
5388 | } | 5405 | } |
5389 | 5406 | ||
5390 | if (block & RADEON_CG_BLOCK_MC) { | 5407 | if (block & RADEON_CG_BLOCK_MC) { |
@@ -5541,7 +5558,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev, | |||
5541 | { | 5558 | { |
5542 | u32 data, orig; | 5559 | u32 data, orig; |
5543 | 5560 | ||
5544 | if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { | 5561 | if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { |
5545 | orig = data = RREG32(RLC_PG_CNTL); | 5562 | orig = data = RREG32(RLC_PG_CNTL); |
5546 | data |= GFX_PG_ENABLE; | 5563 | data |= GFX_PG_ENABLE; |
5547 | if (orig != data) | 5564 | if (orig != data) |
@@ -5805,7 +5822,7 @@ static void cik_init_pg(struct radeon_device *rdev) | |||
5805 | if (rdev->pg_flags) { | 5822 | if (rdev->pg_flags) { |
5806 | cik_enable_sck_slowdown_on_pu(rdev, true); | 5823 | cik_enable_sck_slowdown_on_pu(rdev, true); |
5807 | cik_enable_sck_slowdown_on_pd(rdev, true); | 5824 | cik_enable_sck_slowdown_on_pd(rdev, true); |
5808 | if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { | 5825 | if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { |
5809 | cik_init_gfx_cgpg(rdev); | 5826 | cik_init_gfx_cgpg(rdev); |
5810 | cik_enable_cp_pg(rdev, true); | 5827 | cik_enable_cp_pg(rdev, true); |
5811 | cik_enable_gds_pg(rdev, true); | 5828 | cik_enable_gds_pg(rdev, true); |
@@ -5819,7 +5836,7 @@ static void cik_fini_pg(struct radeon_device *rdev) | |||
5819 | { | 5836 | { |
5820 | if (rdev->pg_flags) { | 5837 | if (rdev->pg_flags) { |
5821 | cik_update_gfx_pg(rdev, false); | 5838 | cik_update_gfx_pg(rdev, false); |
5822 | if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { | 5839 | if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { |
5823 | cik_enable_cp_pg(rdev, false); | 5840 | cik_enable_cp_pg(rdev, false); |
5824 | cik_enable_gds_pg(rdev, false); | 5841 | cik_enable_gds_pg(rdev, false); |
5825 | } | 5842 | } |
@@ -5895,7 +5912,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) | |||
5895 | u32 tmp; | 5912 | u32 tmp; |
5896 | 5913 | ||
5897 | /* gfx ring */ | 5914 | /* gfx ring */ |
5898 | WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | 5915 | tmp = RREG32(CP_INT_CNTL_RING0) & |
5916 | (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | ||
5917 | WREG32(CP_INT_CNTL_RING0, tmp); | ||
5899 | /* sdma */ | 5918 | /* sdma */ |
5900 | tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; | 5919 | tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
5901 | WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); | 5920 | WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); |
@@ -6036,8 +6055,7 @@ static int cik_irq_init(struct radeon_device *rdev) | |||
6036 | */ | 6055 | */ |
6037 | int cik_irq_set(struct radeon_device *rdev) | 6056 | int cik_irq_set(struct radeon_device *rdev) |
6038 | { | 6057 | { |
6039 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE | | 6058 | u32 cp_int_cntl; |
6040 | PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; | ||
6041 | u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; | 6059 | u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; |
6042 | u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; | 6060 | u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; |
6043 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 6061 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
@@ -6058,6 +6076,10 @@ int cik_irq_set(struct radeon_device *rdev) | |||
6058 | return 0; | 6076 | return 0; |
6059 | } | 6077 | } |
6060 | 6078 | ||
6079 | cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & | ||
6080 | (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | ||
6081 | cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; | ||
6082 | |||
6061 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | 6083 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
6062 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; | 6084 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; |
6063 | hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; | 6085 | hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; |
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 8953255e894b..85a69d2ea3d2 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
@@ -28,22 +28,30 @@ | |||
28 | static u32 dce6_endpoint_rreg(struct radeon_device *rdev, | 28 | static u32 dce6_endpoint_rreg(struct radeon_device *rdev, |
29 | u32 block_offset, u32 reg) | 29 | u32 block_offset, u32 reg) |
30 | { | 30 | { |
31 | unsigned long flags; | ||
31 | u32 r; | 32 | u32 r; |
32 | 33 | ||
34 | spin_lock_irqsave(&rdev->end_idx_lock, flags); | ||
33 | WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); | 35 | WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); |
34 | r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); | 36 | r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); |
37 | spin_unlock_irqrestore(&rdev->end_idx_lock, flags); | ||
38 | |||
35 | return r; | 39 | return r; |
36 | } | 40 | } |
37 | 41 | ||
38 | static void dce6_endpoint_wreg(struct radeon_device *rdev, | 42 | static void dce6_endpoint_wreg(struct radeon_device *rdev, |
39 | u32 block_offset, u32 reg, u32 v) | 43 | u32 block_offset, u32 reg, u32 v) |
40 | { | 44 | { |
45 | unsigned long flags; | ||
46 | |||
47 | spin_lock_irqsave(&rdev->end_idx_lock, flags); | ||
41 | if (ASIC_IS_DCE8(rdev)) | 48 | if (ASIC_IS_DCE8(rdev)) |
42 | WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); | 49 | WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); |
43 | else | 50 | else |
44 | WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, | 51 | WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, |
45 | AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); | 52 | AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); |
46 | WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); | 53 | WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); |
54 | spin_unlock_irqrestore(&rdev->end_idx_lock, flags); | ||
47 | } | 55 | } |
48 | 56 | ||
49 | #define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) | 57 | #define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) |
@@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder) | |||
86 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 94 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
87 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 95 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
88 | u32 offset = dig->afmt->offset; | 96 | u32 offset = dig->afmt->offset; |
89 | u32 id = dig->afmt->pin->id; | ||
90 | 97 | ||
91 | if (!dig->afmt->pin) | 98 | if (!dig->afmt->pin) |
92 | return; | 99 | return; |
93 | 100 | ||
94 | WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); | 101 | WREG32(AFMT_AUDIO_SRC_CONTROL + offset, |
102 | AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); | ||
95 | } | 103 | } |
96 | 104 | ||
97 | void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) | 105 | void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index ecd60809db4e..b98b9c97b732 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
@@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev); | |||
40 | static void kv_enable_new_levels(struct radeon_device *rdev); | 40 | static void kv_enable_new_levels(struct radeon_device *rdev); |
41 | static void kv_program_nbps_index_settings(struct radeon_device *rdev, | 41 | static void kv_program_nbps_index_settings(struct radeon_device *rdev, |
42 | struct radeon_ps *new_rps); | 42 | struct radeon_ps *new_rps); |
43 | static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); | ||
43 | static int kv_set_enabled_levels(struct radeon_device *rdev); | 44 | static int kv_set_enabled_levels(struct radeon_device *rdev); |
44 | static int kv_force_dpm_highest(struct radeon_device *rdev); | 45 | static int kv_force_dpm_highest(struct radeon_device *rdev); |
45 | static int kv_force_dpm_lowest(struct radeon_device *rdev); | 46 | static int kv_force_dpm_lowest(struct radeon_device *rdev); |
@@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev) | |||
519 | 520 | ||
520 | static void kv_program_vc(struct radeon_device *rdev) | 521 | static void kv_program_vc(struct radeon_device *rdev) |
521 | { | 522 | { |
522 | WREG32_SMC(CG_FTV_0, 0x3FFFC000); | 523 | WREG32_SMC(CG_FTV_0, 0x3FFFC100); |
523 | } | 524 | } |
524 | 525 | ||
525 | static void kv_clear_vc(struct radeon_device *rdev) | 526 | static void kv_clear_vc(struct radeon_device *rdev) |
@@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev) | |||
638 | 639 | ||
639 | static int kv_unforce_levels(struct radeon_device *rdev) | 640 | static int kv_unforce_levels(struct radeon_device *rdev) |
640 | { | 641 | { |
641 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); | 642 | if (rdev->family == CHIP_KABINI) |
643 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); | ||
644 | else | ||
645 | return kv_set_enabled_levels(rdev); | ||
642 | } | 646 | } |
643 | 647 | ||
644 | static int kv_update_sclk_t(struct radeon_device *rdev) | 648 | static int kv_update_sclk_t(struct radeon_device *rdev) |
@@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev) | |||
667 | &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | 671 | &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
668 | 672 | ||
669 | if (table && table->count) { | 673 | if (table && table->count) { |
670 | for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { | 674 | for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { |
671 | if ((table->entries[i].clk == pi->boot_pl.sclk) || | 675 | if (table->entries[i].clk == pi->boot_pl.sclk) |
672 | (i == 0)) | ||
673 | break; | 676 | break; |
674 | } | 677 | } |
675 | 678 | ||
@@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev) | |||
682 | if (table->num_max_dpm_entries == 0) | 685 | if (table->num_max_dpm_entries == 0) |
683 | return -EINVAL; | 686 | return -EINVAL; |
684 | 687 | ||
685 | for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { | 688 | for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { |
686 | if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || | 689 | if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) |
687 | (i == 0)) | ||
688 | break; | 690 | break; |
689 | } | 691 | } |
690 | 692 | ||
@@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable) | |||
1078 | PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); | 1080 | PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); |
1079 | } | 1081 | } |
1080 | 1082 | ||
1083 | static void kv_reset_acp_boot_level(struct radeon_device *rdev) | ||
1084 | { | ||
1085 | struct kv_power_info *pi = kv_get_pi(rdev); | ||
1086 | |||
1087 | pi->acp_boot_level = 0xff; | ||
1088 | } | ||
1089 | |||
1081 | static void kv_update_current_ps(struct radeon_device *rdev, | 1090 | static void kv_update_current_ps(struct radeon_device *rdev, |
1082 | struct radeon_ps *rps) | 1091 | struct radeon_ps *rps) |
1083 | { | 1092 | { |
@@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev, | |||
1100 | pi->requested_rps.ps_priv = &pi->requested_ps; | 1109 | pi->requested_rps.ps_priv = &pi->requested_ps; |
1101 | } | 1110 | } |
1102 | 1111 | ||
1112 | void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) | ||
1113 | { | ||
1114 | struct kv_power_info *pi = kv_get_pi(rdev); | ||
1115 | int ret; | ||
1116 | |||
1117 | if (pi->bapm_enable) { | ||
1118 | ret = kv_smc_bapm_enable(rdev, enable); | ||
1119 | if (ret) | ||
1120 | DRM_ERROR("kv_smc_bapm_enable failed\n"); | ||
1121 | } | ||
1122 | } | ||
1123 | |||
1103 | int kv_dpm_enable(struct radeon_device *rdev) | 1124 | int kv_dpm_enable(struct radeon_device *rdev) |
1104 | { | 1125 | { |
1105 | struct kv_power_info *pi = kv_get_pi(rdev); | 1126 | struct kv_power_info *pi = kv_get_pi(rdev); |
@@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
1192 | return ret; | 1213 | return ret; |
1193 | } | 1214 | } |
1194 | 1215 | ||
1216 | kv_reset_acp_boot_level(rdev); | ||
1217 | |||
1195 | if (rdev->irq.installed && | 1218 | if (rdev->irq.installed && |
1196 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1219 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
1197 | ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 1220 | ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
@@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
1203 | radeon_irq_set(rdev); | 1226 | radeon_irq_set(rdev); |
1204 | } | 1227 | } |
1205 | 1228 | ||
1229 | ret = kv_smc_bapm_enable(rdev, false); | ||
1230 | if (ret) { | ||
1231 | DRM_ERROR("kv_smc_bapm_enable failed\n"); | ||
1232 | return ret; | ||
1233 | } | ||
1234 | |||
1206 | /* powerdown unused blocks for now */ | 1235 | /* powerdown unused blocks for now */ |
1207 | kv_dpm_powergate_acp(rdev, true); | 1236 | kv_dpm_powergate_acp(rdev, true); |
1208 | kv_dpm_powergate_samu(rdev, true); | 1237 | kv_dpm_powergate_samu(rdev, true); |
@@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev) | |||
1226 | RADEON_CG_BLOCK_BIF | | 1255 | RADEON_CG_BLOCK_BIF | |
1227 | RADEON_CG_BLOCK_HDP), false); | 1256 | RADEON_CG_BLOCK_HDP), false); |
1228 | 1257 | ||
1258 | kv_smc_bapm_enable(rdev, false); | ||
1259 | |||
1229 | /* powerup blocks */ | 1260 | /* powerup blocks */ |
1230 | kv_dpm_powergate_acp(rdev, false); | 1261 | kv_dpm_powergate_acp(rdev, false); |
1231 | kv_dpm_powergate_samu(rdev, false); | 1262 | kv_dpm_powergate_samu(rdev, false); |
@@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) | |||
1450 | return kv_enable_samu_dpm(rdev, !gate); | 1481 | return kv_enable_samu_dpm(rdev, !gate); |
1451 | } | 1482 | } |
1452 | 1483 | ||
1484 | static u8 kv_get_acp_boot_level(struct radeon_device *rdev) | ||
1485 | { | ||
1486 | u8 i; | ||
1487 | struct radeon_clock_voltage_dependency_table *table = | ||
1488 | &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; | ||
1489 | |||
1490 | for (i = 0; i < table->count; i++) { | ||
1491 | if (table->entries[i].clk >= 0) /* XXX */ | ||
1492 | break; | ||
1493 | } | ||
1494 | |||
1495 | if (i >= table->count) | ||
1496 | i = table->count - 1; | ||
1497 | |||
1498 | return i; | ||
1499 | } | ||
1500 | |||
1501 | static void kv_update_acp_boot_level(struct radeon_device *rdev) | ||
1502 | { | ||
1503 | struct kv_power_info *pi = kv_get_pi(rdev); | ||
1504 | u8 acp_boot_level; | ||
1505 | |||
1506 | if (!pi->caps_stable_p_state) { | ||
1507 | acp_boot_level = kv_get_acp_boot_level(rdev); | ||
1508 | if (acp_boot_level != pi->acp_boot_level) { | ||
1509 | pi->acp_boot_level = acp_boot_level; | ||
1510 | kv_send_msg_to_smc_with_parameter(rdev, | ||
1511 | PPSMC_MSG_ACPDPM_SetEnabledMask, | ||
1512 | (1 << pi->acp_boot_level)); | ||
1513 | } | ||
1514 | } | ||
1515 | } | ||
1516 | |||
1453 | static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) | 1517 | static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) |
1454 | { | 1518 | { |
1455 | struct kv_power_info *pi = kv_get_pi(rdev); | 1519 | struct kv_power_info *pi = kv_get_pi(rdev); |
@@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) | |||
1461 | if (pi->caps_stable_p_state) | 1525 | if (pi->caps_stable_p_state) |
1462 | pi->acp_boot_level = table->count - 1; | 1526 | pi->acp_boot_level = table->count - 1; |
1463 | else | 1527 | else |
1464 | pi->acp_boot_level = 0; | 1528 | pi->acp_boot_level = kv_get_acp_boot_level(rdev); |
1465 | 1529 | ||
1466 | ret = kv_copy_bytes_to_smc(rdev, | 1530 | ret = kv_copy_bytes_to_smc(rdev, |
1467 | pi->dpm_table_start + | 1531 | pi->dpm_table_start + |
@@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev, | |||
1588 | } | 1652 | } |
1589 | } | 1653 | } |
1590 | 1654 | ||
1591 | for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { | 1655 | for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { |
1592 | if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || | 1656 | if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) |
1593 | (i == 0)) { | ||
1594 | pi->highest_valid = i; | ||
1595 | break; | 1657 | break; |
1596 | } | ||
1597 | } | 1658 | } |
1659 | pi->highest_valid = i; | ||
1598 | 1660 | ||
1599 | if (pi->lowest_valid > pi->highest_valid) { | 1661 | if (pi->lowest_valid > pi->highest_valid) { |
1600 | if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > | 1662 | if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > |
@@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev, | |||
1615 | } | 1677 | } |
1616 | } | 1678 | } |
1617 | 1679 | ||
1618 | for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { | 1680 | for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { |
1619 | if (table->entries[i].sclk_frequency <= | 1681 | if (table->entries[i].sclk_frequency <= |
1620 | new_ps->levels[new_ps->num_levels - 1].sclk || | 1682 | new_ps->levels[new_ps->num_levels - 1].sclk) |
1621 | i == 0) { | ||
1622 | pi->highest_valid = i; | ||
1623 | break; | 1683 | break; |
1624 | } | ||
1625 | } | 1684 | } |
1685 | pi->highest_valid = i; | ||
1626 | 1686 | ||
1627 | if (pi->lowest_valid > pi->highest_valid) { | 1687 | if (pi->lowest_valid > pi->highest_valid) { |
1628 | if ((new_ps->levels[0].sclk - | 1688 | if ((new_ps->levels[0].sclk - |
@@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1724 | RADEON_CG_BLOCK_BIF | | 1784 | RADEON_CG_BLOCK_BIF | |
1725 | RADEON_CG_BLOCK_HDP), false); | 1785 | RADEON_CG_BLOCK_HDP), false); |
1726 | 1786 | ||
1787 | if (pi->bapm_enable) { | ||
1788 | ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); | ||
1789 | if (ret) { | ||
1790 | DRM_ERROR("kv_smc_bapm_enable failed\n"); | ||
1791 | return ret; | ||
1792 | } | ||
1793 | } | ||
1794 | |||
1727 | if (rdev->family == CHIP_KABINI) { | 1795 | if (rdev->family == CHIP_KABINI) { |
1728 | if (pi->enable_dpm) { | 1796 | if (pi->enable_dpm) { |
1729 | kv_set_valid_clock_range(rdev, new_ps); | 1797 | kv_set_valid_clock_range(rdev, new_ps); |
@@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1775 | return ret; | 1843 | return ret; |
1776 | } | 1844 | } |
1777 | #endif | 1845 | #endif |
1846 | kv_update_acp_boot_level(rdev); | ||
1778 | kv_update_sclk_t(rdev); | 1847 | kv_update_sclk_t(rdev); |
1779 | kv_enable_nb_dpm(rdev); | 1848 | kv_enable_nb_dpm(rdev); |
1780 | } | 1849 | } |
@@ -1806,12 +1875,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev) | |||
1806 | 1875 | ||
1807 | void kv_dpm_reset_asic(struct radeon_device *rdev) | 1876 | void kv_dpm_reset_asic(struct radeon_device *rdev) |
1808 | { | 1877 | { |
1809 | kv_force_lowest_valid(rdev); | 1878 | struct kv_power_info *pi = kv_get_pi(rdev); |
1810 | kv_init_graphics_levels(rdev); | 1879 | |
1811 | kv_program_bootup_state(rdev); | 1880 | if (rdev->family == CHIP_KABINI) { |
1812 | kv_upload_dpm_settings(rdev); | 1881 | kv_force_lowest_valid(rdev); |
1813 | kv_force_lowest_valid(rdev); | 1882 | kv_init_graphics_levels(rdev); |
1814 | kv_unforce_levels(rdev); | 1883 | kv_program_bootup_state(rdev); |
1884 | kv_upload_dpm_settings(rdev); | ||
1885 | kv_force_lowest_valid(rdev); | ||
1886 | kv_unforce_levels(rdev); | ||
1887 | } else { | ||
1888 | kv_init_graphics_levels(rdev); | ||
1889 | kv_program_bootup_state(rdev); | ||
1890 | kv_freeze_sclk_dpm(rdev, true); | ||
1891 | kv_upload_dpm_settings(rdev); | ||
1892 | kv_freeze_sclk_dpm(rdev, false); | ||
1893 | kv_set_enabled_level(rdev, pi->graphics_boot_level); | ||
1894 | } | ||
1815 | } | 1895 | } |
1816 | 1896 | ||
1817 | //XXX use sumo_dpm_display_configuration_changed | 1897 | //XXX use sumo_dpm_display_configuration_changed |
@@ -1871,12 +1951,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev) | |||
1871 | if (ret) | 1951 | if (ret) |
1872 | return ret; | 1952 | return ret; |
1873 | 1953 | ||
1874 | for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { | 1954 | for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { |
1875 | if (enable_mask & (1 << i)) | 1955 | if (enable_mask & (1 << i)) |
1876 | break; | 1956 | break; |
1877 | } | 1957 | } |
1878 | 1958 | ||
1879 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); | 1959 | if (rdev->family == CHIP_KABINI) |
1960 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); | ||
1961 | else | ||
1962 | return kv_set_enabled_level(rdev, i); | ||
1880 | } | 1963 | } |
1881 | 1964 | ||
1882 | static int kv_force_dpm_lowest(struct radeon_device *rdev) | 1965 | static int kv_force_dpm_lowest(struct radeon_device *rdev) |
@@ -1893,7 +1976,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev) | |||
1893 | break; | 1976 | break; |
1894 | } | 1977 | } |
1895 | 1978 | ||
1896 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); | 1979 | if (rdev->family == CHIP_KABINI) |
1980 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); | ||
1981 | else | ||
1982 | return kv_set_enabled_level(rdev, i); | ||
1897 | } | 1983 | } |
1898 | 1984 | ||
1899 | static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, | 1985 | static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, |
@@ -1911,9 +1997,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, | |||
1911 | if (!pi->caps_sclk_ds) | 1997 | if (!pi->caps_sclk_ds) |
1912 | return 0; | 1998 | return 0; |
1913 | 1999 | ||
1914 | for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { | 2000 | for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { |
1915 | temp = sclk / sumo_get_sleep_divider_from_id(i); | 2001 | temp = sclk / sumo_get_sleep_divider_from_id(i); |
1916 | if ((temp >= min) || (i == 0)) | 2002 | if (temp >= min) |
1917 | break; | 2003 | break; |
1918 | } | 2004 | } |
1919 | 2005 | ||
@@ -2039,12 +2125,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2039 | ps->dpmx_nb_ps_lo = 0x1; | 2125 | ps->dpmx_nb_ps_lo = 0x1; |
2040 | ps->dpmx_nb_ps_hi = 0x0; | 2126 | ps->dpmx_nb_ps_hi = 0x0; |
2041 | } else { | 2127 | } else { |
2042 | ps->dpm0_pg_nb_ps_lo = 0x1; | 2128 | ps->dpm0_pg_nb_ps_lo = 0x3; |
2043 | ps->dpm0_pg_nb_ps_hi = 0x0; | 2129 | ps->dpm0_pg_nb_ps_hi = 0x0; |
2044 | ps->dpmx_nb_ps_lo = 0x2; | 2130 | ps->dpmx_nb_ps_lo = 0x3; |
2045 | ps->dpmx_nb_ps_hi = 0x1; | 2131 | ps->dpmx_nb_ps_hi = 0x0; |
2046 | 2132 | ||
2047 | if (pi->sys_info.nb_dpm_enable && pi->battery_state) { | 2133 | if (pi->sys_info.nb_dpm_enable) { |
2048 | force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || | 2134 | force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || |
2049 | pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || | 2135 | pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || |
2050 | pi->disable_nb_ps3_in_battery; | 2136 | pi->disable_nb_ps3_in_battery; |
@@ -2210,6 +2296,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev) | |||
2210 | } | 2296 | } |
2211 | } | 2297 | } |
2212 | 2298 | ||
2299 | static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) | ||
2300 | { | ||
2301 | u32 new_mask = (1 << level); | ||
2302 | |||
2303 | return kv_send_msg_to_smc_with_parameter(rdev, | ||
2304 | PPSMC_MSG_SCLKDPM_SetEnabledMask, | ||
2305 | new_mask); | ||
2306 | } | ||
2307 | |||
2213 | static int kv_set_enabled_levels(struct radeon_device *rdev) | 2308 | static int kv_set_enabled_levels(struct radeon_device *rdev) |
2214 | { | 2309 | { |
2215 | struct kv_power_info *pi = kv_get_pi(rdev); | 2310 | struct kv_power_info *pi = kv_get_pi(rdev); |
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h index 32bb079572d7..8cef7525d7a8 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.h +++ b/drivers/gpu/drm/radeon/kv_dpm.h | |||
@@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev, | |||
192 | int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, | 192 | int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, |
193 | u32 *value, u32 limit); | 193 | u32 *value, u32 limit); |
194 | int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); | 194 | int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); |
195 | int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable); | ||
195 | int kv_copy_bytes_to_smc(struct radeon_device *rdev, | 196 | int kv_copy_bytes_to_smc(struct radeon_device *rdev, |
196 | u32 smc_start_address, | 197 | u32 smc_start_address, |
197 | const u8 *src, u32 byte_count, u32 limit); | 198 | const u8 *src, u32 byte_count, u32 limit); |
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c index 34a226d7e34a..0000b59a6d05 100644 --- a/drivers/gpu/drm/radeon/kv_smc.c +++ b/drivers/gpu/drm/radeon/kv_smc.c | |||
@@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable) | |||
107 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); | 107 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); |
108 | } | 108 | } |
109 | 109 | ||
110 | int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable) | ||
111 | { | ||
112 | if (enable) | ||
113 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM); | ||
114 | else | ||
115 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM); | ||
116 | } | ||
117 | |||
110 | int kv_copy_bytes_to_smc(struct radeon_device *rdev, | 118 | int kv_copy_bytes_to_smc(struct radeon_device *rdev, |
111 | u32 smc_start_address, | 119 | u32 smc_start_address, |
112 | const u8 *src, u32 byte_count, u32 limit) | 120 | const u8 *src, u32 byte_count, u32 limit) |
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h index 682842804bce..5670b8291285 100644 --- a/drivers/gpu/drm/radeon/ppsmc.h +++ b/drivers/gpu/drm/radeon/ppsmc.h | |||
@@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result; | |||
163 | #define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) | 163 | #define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) |
164 | #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) | 164 | #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) |
165 | #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) | 165 | #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) |
166 | #define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) | ||
167 | #define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121) | ||
166 | #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) | 168 | #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) |
167 | 169 | ||
168 | 170 | ||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 9fc61dd68bc0..24175717307b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev) | |||
2853 | 2853 | ||
2854 | uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) | 2854 | uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) |
2855 | { | 2855 | { |
2856 | unsigned long flags; | ||
2856 | uint32_t data; | 2857 | uint32_t data; |
2857 | 2858 | ||
2859 | spin_lock_irqsave(&rdev->pll_idx_lock, flags); | ||
2858 | WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); | 2860 | WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); |
2859 | r100_pll_errata_after_index(rdev); | 2861 | r100_pll_errata_after_index(rdev); |
2860 | data = RREG32(RADEON_CLOCK_CNTL_DATA); | 2862 | data = RREG32(RADEON_CLOCK_CNTL_DATA); |
2861 | r100_pll_errata_after_data(rdev); | 2863 | r100_pll_errata_after_data(rdev); |
2864 | spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); | ||
2862 | return data; | 2865 | return data; |
2863 | } | 2866 | } |
2864 | 2867 | ||
2865 | void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 2868 | void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
2866 | { | 2869 | { |
2870 | unsigned long flags; | ||
2871 | |||
2872 | spin_lock_irqsave(&rdev->pll_idx_lock, flags); | ||
2867 | WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); | 2873 | WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); |
2868 | r100_pll_errata_after_index(rdev); | 2874 | r100_pll_errata_after_index(rdev); |
2869 | WREG32(RADEON_CLOCK_CNTL_DATA, v); | 2875 | WREG32(RADEON_CLOCK_CNTL_DATA, v); |
2870 | r100_pll_errata_after_data(rdev); | 2876 | r100_pll_errata_after_data(rdev); |
2877 | spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); | ||
2871 | } | 2878 | } |
2872 | 2879 | ||
2873 | static void r100_set_safe_registers(struct radeon_device *rdev) | 2880 | static void r100_set_safe_registers(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4e796ecf9ea4..6edf2b3a52b4 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
160 | 160 | ||
161 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) | 161 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) |
162 | { | 162 | { |
163 | unsigned long flags; | ||
163 | u32 r; | 164 | u32 r; |
164 | 165 | ||
166 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
165 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); | 167 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); |
166 | r = RREG32(R_0001FC_MC_IND_DATA); | 168 | r = RREG32(R_0001FC_MC_IND_DATA); |
169 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
167 | return r; | 170 | return r; |
168 | } | 171 | } |
169 | 172 | ||
170 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 173 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
171 | { | 174 | { |
175 | unsigned long flags; | ||
176 | |||
177 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
172 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | | 178 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | |
173 | S_0001F8_MC_IND_WR_EN(1)); | 179 | S_0001F8_MC_IND_WR_EN(1)); |
174 | WREG32(R_0001FC_MC_IND_DATA, v); | 180 | WREG32(R_0001FC_MC_IND_DATA, v); |
181 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
175 | } | 182 | } |
176 | 183 | ||
177 | static void r420_debugfs(struct radeon_device *rdev) | 184 | static void r420_debugfs(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index ea4d3734e6d9..2a1b1876b431 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev) | |||
119 | return rdev->clock.spll.reference_freq; | 119 | return rdev->clock.spll.reference_freq; |
120 | } | 120 | } |
121 | 121 | ||
122 | int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | ||
123 | { | ||
124 | return 0; | ||
125 | } | ||
126 | |||
122 | /* get temperature in millidegrees */ | 127 | /* get temperature in millidegrees */ |
123 | int rv6xx_get_temp(struct radeon_device *rdev) | 128 | int rv6xx_get_temp(struct radeon_device *rdev) |
124 | { | 129 | { |
@@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) | |||
1045 | 1050 | ||
1046 | uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 1051 | uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
1047 | { | 1052 | { |
1053 | unsigned long flags; | ||
1048 | uint32_t r; | 1054 | uint32_t r; |
1049 | 1055 | ||
1056 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
1050 | WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); | 1057 | WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); |
1051 | r = RREG32(R_0028FC_MC_DATA); | 1058 | r = RREG32(R_0028FC_MC_DATA); |
1052 | WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); | 1059 | WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); |
1060 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
1053 | return r; | 1061 | return r; |
1054 | } | 1062 | } |
1055 | 1063 | ||
1056 | void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 1064 | void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
1057 | { | 1065 | { |
1066 | unsigned long flags; | ||
1067 | |||
1068 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
1058 | WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | | 1069 | WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | |
1059 | S_0028F8_MC_IND_WR_EN(1)); | 1070 | S_0028F8_MC_IND_WR_EN(1)); |
1060 | WREG32(R_0028FC_MC_DATA, v); | 1071 | WREG32(R_0028FC_MC_DATA, v); |
1061 | WREG32(R_0028F8_MC_INDEX, 0x7F); | 1072 | WREG32(R_0028F8_MC_INDEX, 0x7F); |
1073 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
1062 | } | 1074 | } |
1063 | 1075 | ||
1064 | static void r600_mc_program(struct radeon_device *rdev) | 1076 | static void r600_mc_program(struct radeon_device *rdev) |
@@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev) | |||
2092 | */ | 2104 | */ |
2093 | u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) | 2105 | u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) |
2094 | { | 2106 | { |
2107 | unsigned long flags; | ||
2095 | u32 r; | 2108 | u32 r; |
2096 | 2109 | ||
2110 | spin_lock_irqsave(&rdev->pciep_idx_lock, flags); | ||
2097 | WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); | 2111 | WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); |
2098 | (void)RREG32(PCIE_PORT_INDEX); | 2112 | (void)RREG32(PCIE_PORT_INDEX); |
2099 | r = RREG32(PCIE_PORT_DATA); | 2113 | r = RREG32(PCIE_PORT_DATA); |
2114 | spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); | ||
2100 | return r; | 2115 | return r; |
2101 | } | 2116 | } |
2102 | 2117 | ||
2103 | void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 2118 | void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
2104 | { | 2119 | { |
2120 | unsigned long flags; | ||
2121 | |||
2122 | spin_lock_irqsave(&rdev->pciep_idx_lock, flags); | ||
2105 | WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); | 2123 | WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); |
2106 | (void)RREG32(PCIE_PORT_INDEX); | 2124 | (void)RREG32(PCIE_PORT_INDEX); |
2107 | WREG32(PCIE_PORT_DATA, (v)); | 2125 | WREG32(PCIE_PORT_DATA, (v)); |
2108 | (void)RREG32(PCIE_PORT_DATA); | 2126 | (void)RREG32(PCIE_PORT_DATA); |
2127 | spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); | ||
2109 | } | 2128 | } |
2110 | 2129 | ||
2111 | /* | 2130 | /* |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index fa0de46fcc0d..e65f211a7be0 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) | |||
1219 | 1219 | ||
1220 | void r600_free_extended_power_table(struct radeon_device *rdev) | 1220 | void r600_free_extended_power_table(struct radeon_device *rdev) |
1221 | { | 1221 | { |
1222 | if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) | 1222 | struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; |
1223 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | 1223 | |
1224 | if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) | 1224 | kfree(dyn_state->vddc_dependency_on_sclk.entries); |
1225 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | 1225 | kfree(dyn_state->vddci_dependency_on_mclk.entries); |
1226 | if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) | 1226 | kfree(dyn_state->vddc_dependency_on_mclk.entries); |
1227 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | 1227 | kfree(dyn_state->mvdd_dependency_on_mclk.entries); |
1228 | if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) | 1228 | kfree(dyn_state->cac_leakage_table.entries); |
1229 | kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); | 1229 | kfree(dyn_state->phase_shedding_limits_table.entries); |
1230 | if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) | 1230 | kfree(dyn_state->ppm_table); |
1231 | kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); | 1231 | kfree(dyn_state->cac_tdp_table); |
1232 | if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) | 1232 | kfree(dyn_state->vce_clock_voltage_dependency_table.entries); |
1233 | kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); | 1233 | kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); |
1234 | if (rdev->pm.dpm.dyn_state.ppm_table) | 1234 | kfree(dyn_state->samu_clock_voltage_dependency_table.entries); |
1235 | kfree(rdev->pm.dpm.dyn_state.ppm_table); | 1235 | kfree(dyn_state->acp_clock_voltage_dependency_table.entries); |
1236 | if (rdev->pm.dpm.dyn_state.cac_tdp_table) | ||
1237 | kfree(rdev->pm.dpm.dyn_state.cac_tdp_table); | ||
1238 | if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) | ||
1239 | kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); | ||
1240 | if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) | ||
1241 | kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); | ||
1242 | if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) | ||
1243 | kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries); | ||
1244 | if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) | ||
1245 | kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries); | ||
1246 | } | 1236 | } |
1247 | 1237 | ||
1248 | enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, | 1238 | enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ff8b564ce2b2..a400ac1c4147 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -181,7 +181,7 @@ extern int radeon_aspm; | |||
181 | #define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) | 181 | #define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) |
182 | 182 | ||
183 | /* PG flags */ | 183 | /* PG flags */ |
184 | #define RADEON_PG_SUPPORT_GFX_CG (1 << 0) | 184 | #define RADEON_PG_SUPPORT_GFX_PG (1 << 0) |
185 | #define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) | 185 | #define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) |
186 | #define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) | 186 | #define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) |
187 | #define RADEON_PG_SUPPORT_UVD (1 << 3) | 187 | #define RADEON_PG_SUPPORT_UVD (1 << 3) |
@@ -1778,6 +1778,7 @@ struct radeon_asic { | |||
1778 | int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); | 1778 | int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); |
1779 | bool (*vblank_too_short)(struct radeon_device *rdev); | 1779 | bool (*vblank_too_short)(struct radeon_device *rdev); |
1780 | void (*powergate_uvd)(struct radeon_device *rdev, bool gate); | 1780 | void (*powergate_uvd)(struct radeon_device *rdev, bool gate); |
1781 | void (*enable_bapm)(struct radeon_device *rdev, bool enable); | ||
1781 | } dpm; | 1782 | } dpm; |
1782 | /* pageflipping */ | 1783 | /* pageflipping */ |
1783 | struct { | 1784 | struct { |
@@ -2110,6 +2111,28 @@ struct radeon_device { | |||
2110 | resource_size_t rmmio_size; | 2111 | resource_size_t rmmio_size; |
2111 | /* protects concurrent MM_INDEX/DATA based register access */ | 2112 | /* protects concurrent MM_INDEX/DATA based register access */ |
2112 | spinlock_t mmio_idx_lock; | 2113 | spinlock_t mmio_idx_lock; |
2114 | /* protects concurrent SMC based register access */ | ||
2115 | spinlock_t smc_idx_lock; | ||
2116 | /* protects concurrent PLL register access */ | ||
2117 | spinlock_t pll_idx_lock; | ||
2118 | /* protects concurrent MC register access */ | ||
2119 | spinlock_t mc_idx_lock; | ||
2120 | /* protects concurrent PCIE register access */ | ||
2121 | spinlock_t pcie_idx_lock; | ||
2122 | /* protects concurrent PCIE_PORT register access */ | ||
2123 | spinlock_t pciep_idx_lock; | ||
2124 | /* protects concurrent PIF register access */ | ||
2125 | spinlock_t pif_idx_lock; | ||
2126 | /* protects concurrent CG register access */ | ||
2127 | spinlock_t cg_idx_lock; | ||
2128 | /* protects concurrent UVD register access */ | ||
2129 | spinlock_t uvd_idx_lock; | ||
2130 | /* protects concurrent RCU register access */ | ||
2131 | spinlock_t rcu_idx_lock; | ||
2132 | /* protects concurrent DIDT register access */ | ||
2133 | spinlock_t didt_idx_lock; | ||
2134 | /* protects concurrent ENDPOINT (audio) register access */ | ||
2135 | spinlock_t end_idx_lock; | ||
2113 | void __iomem *rmmio; | 2136 | void __iomem *rmmio; |
2114 | radeon_rreg_t mc_rreg; | 2137 | radeon_rreg_t mc_rreg; |
2115 | radeon_wreg_t mc_wreg; | 2138 | radeon_wreg_t mc_wreg; |
@@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); | |||
2277 | */ | 2300 | */ |
2278 | static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) | 2301 | static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
2279 | { | 2302 | { |
2303 | unsigned long flags; | ||
2280 | uint32_t r; | 2304 | uint32_t r; |
2281 | 2305 | ||
2306 | spin_lock_irqsave(&rdev->pcie_idx_lock, flags); | ||
2282 | WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); | 2307 | WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
2283 | r = RREG32(RADEON_PCIE_DATA); | 2308 | r = RREG32(RADEON_PCIE_DATA); |
2309 | spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); | ||
2284 | return r; | 2310 | return r; |
2285 | } | 2311 | } |
2286 | 2312 | ||
2287 | static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 2313 | static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
2288 | { | 2314 | { |
2315 | unsigned long flags; | ||
2316 | |||
2317 | spin_lock_irqsave(&rdev->pcie_idx_lock, flags); | ||
2289 | WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); | 2318 | WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
2290 | WREG32(RADEON_PCIE_DATA, (v)); | 2319 | WREG32(RADEON_PCIE_DATA, (v)); |
2320 | spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); | ||
2291 | } | 2321 | } |
2292 | 2322 | ||
2293 | static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) | 2323 | static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) |
2294 | { | 2324 | { |
2325 | unsigned long flags; | ||
2295 | u32 r; | 2326 | u32 r; |
2296 | 2327 | ||
2328 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
2297 | WREG32(TN_SMC_IND_INDEX_0, (reg)); | 2329 | WREG32(TN_SMC_IND_INDEX_0, (reg)); |
2298 | r = RREG32(TN_SMC_IND_DATA_0); | 2330 | r = RREG32(TN_SMC_IND_DATA_0); |
2331 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
2299 | return r; | 2332 | return r; |
2300 | } | 2333 | } |
2301 | 2334 | ||
2302 | static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 2335 | static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
2303 | { | 2336 | { |
2337 | unsigned long flags; | ||
2338 | |||
2339 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
2304 | WREG32(TN_SMC_IND_INDEX_0, (reg)); | 2340 | WREG32(TN_SMC_IND_INDEX_0, (reg)); |
2305 | WREG32(TN_SMC_IND_DATA_0, (v)); | 2341 | WREG32(TN_SMC_IND_DATA_0, (v)); |
2342 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
2306 | } | 2343 | } |
2307 | 2344 | ||
2308 | static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) | 2345 | static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) |
2309 | { | 2346 | { |
2347 | unsigned long flags; | ||
2310 | u32 r; | 2348 | u32 r; |
2311 | 2349 | ||
2350 | spin_lock_irqsave(&rdev->rcu_idx_lock, flags); | ||
2312 | WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); | 2351 | WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); |
2313 | r = RREG32(R600_RCU_DATA); | 2352 | r = RREG32(R600_RCU_DATA); |
2353 | spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); | ||
2314 | return r; | 2354 | return r; |
2315 | } | 2355 | } |
2316 | 2356 | ||
2317 | static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 2357 | static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
2318 | { | 2358 | { |
2359 | unsigned long flags; | ||
2360 | |||
2361 | spin_lock_irqsave(&rdev->rcu_idx_lock, flags); | ||
2319 | WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); | 2362 | WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); |
2320 | WREG32(R600_RCU_DATA, (v)); | 2363 | WREG32(R600_RCU_DATA, (v)); |
2364 | spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); | ||
2321 | } | 2365 | } |
2322 | 2366 | ||
2323 | static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) | 2367 | static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) |
2324 | { | 2368 | { |
2369 | unsigned long flags; | ||
2325 | u32 r; | 2370 | u32 r; |
2326 | 2371 | ||
2372 | spin_lock_irqsave(&rdev->cg_idx_lock, flags); | ||
2327 | WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); | 2373 | WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); |
2328 | r = RREG32(EVERGREEN_CG_IND_DATA); | 2374 | r = RREG32(EVERGREEN_CG_IND_DATA); |
2375 | spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); | ||
2329 | return r; | 2376 | return r; |
2330 | } | 2377 | } |
2331 | 2378 | ||
2332 | static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 2379 | static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
2333 | { | 2380 | { |
2381 | unsigned long flags; | ||
2382 | |||
2383 | spin_lock_irqsave(&rdev->cg_idx_lock, flags); | ||
2334 | WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); | 2384 | WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); |
2335 | WREG32(EVERGREEN_CG_IND_DATA, (v)); | 2385 | WREG32(EVERGREEN_CG_IND_DATA, (v)); |
2386 | spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); | ||
2336 | } | 2387 | } |
2337 | 2388 | ||
2338 | static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) | 2389 | static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) |
2339 | { | 2390 | { |
2391 | unsigned long flags; | ||
2340 | u32 r; | 2392 | u32 r; |
2341 | 2393 | ||
2394 | spin_lock_irqsave(&rdev->pif_idx_lock, flags); | ||
2342 | WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); | 2395 | WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); |
2343 | r = RREG32(EVERGREEN_PIF_PHY0_DATA); | 2396 | r = RREG32(EVERGREEN_PIF_PHY0_DATA); |
2397 | spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); | ||
2344 | return r; | 2398 | return r; |
2345 | } | 2399 | } |
2346 | 2400 | ||
2347 | static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 2401 | static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
2348 | { | 2402 | { |
2403 | unsigned long flags; | ||
2404 | |||
2405 | spin_lock_irqsave(&rdev->pif_idx_lock, flags); | ||
2349 | WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); | 2406 | WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); |
2350 | WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); | 2407 | WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); |
2408 | spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); | ||
2351 | } | 2409 | } |
2352 | 2410 | ||
2353 | static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) | 2411 | static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) |
2354 | { | 2412 | { |
2413 | unsigned long flags; | ||
2355 | u32 r; | 2414 | u32 r; |
2356 | 2415 | ||
2416 | spin_lock_irqsave(&rdev->pif_idx_lock, flags); | ||
2357 | WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); | 2417 | WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); |
2358 | r = RREG32(EVERGREEN_PIF_PHY1_DATA); | 2418 | r = RREG32(EVERGREEN_PIF_PHY1_DATA); |
2419 | spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); | ||
2359 | return r; | 2420 | return r; |
2360 | } | 2421 | } |
2361 | 2422 | ||
2362 | static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 2423 | static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
2363 | { | 2424 | { |
2425 | unsigned long flags; | ||
2426 | |||
2427 | spin_lock_irqsave(&rdev->pif_idx_lock, flags); | ||
2364 | WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); | 2428 | WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); |
2365 | WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); | 2429 | WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); |
2430 | spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); | ||
2366 | } | 2431 | } |
2367 | 2432 | ||
2368 | static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) | 2433 | static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) |
2369 | { | 2434 | { |
2435 | unsigned long flags; | ||
2370 | u32 r; | 2436 | u32 r; |
2371 | 2437 | ||
2438 | spin_lock_irqsave(&rdev->uvd_idx_lock, flags); | ||
2372 | WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); | 2439 | WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); |
2373 | r = RREG32(R600_UVD_CTX_DATA); | 2440 | r = RREG32(R600_UVD_CTX_DATA); |
2441 | spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); | ||
2374 | return r; | 2442 | return r; |
2375 | } | 2443 | } |
2376 | 2444 | ||
2377 | static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 2445 | static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
2378 | { | 2446 | { |
2447 | unsigned long flags; | ||
2448 | |||
2449 | spin_lock_irqsave(&rdev->uvd_idx_lock, flags); | ||
2379 | WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); | 2450 | WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); |
2380 | WREG32(R600_UVD_CTX_DATA, (v)); | 2451 | WREG32(R600_UVD_CTX_DATA, (v)); |
2452 | spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); | ||
2381 | } | 2453 | } |
2382 | 2454 | ||
2383 | 2455 | ||
2384 | static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) | 2456 | static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) |
2385 | { | 2457 | { |
2458 | unsigned long flags; | ||
2386 | u32 r; | 2459 | u32 r; |
2387 | 2460 | ||
2461 | spin_lock_irqsave(&rdev->didt_idx_lock, flags); | ||
2388 | WREG32(CIK_DIDT_IND_INDEX, (reg)); | 2462 | WREG32(CIK_DIDT_IND_INDEX, (reg)); |
2389 | r = RREG32(CIK_DIDT_IND_DATA); | 2463 | r = RREG32(CIK_DIDT_IND_DATA); |
2464 | spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); | ||
2390 | return r; | 2465 | return r; |
2391 | } | 2466 | } |
2392 | 2467 | ||
2393 | static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) | 2468 | static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
2394 | { | 2469 | { |
2470 | unsigned long flags; | ||
2471 | |||
2472 | spin_lock_irqsave(&rdev->didt_idx_lock, flags); | ||
2395 | WREG32(CIK_DIDT_IND_INDEX, (reg)); | 2473 | WREG32(CIK_DIDT_IND_INDEX, (reg)); |
2396 | WREG32(CIK_DIDT_IND_DATA, (v)); | 2474 | WREG32(CIK_DIDT_IND_DATA, (v)); |
2475 | spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); | ||
2397 | } | 2476 | } |
2398 | 2477 | ||
2399 | void r100_pll_errata_after_index(struct radeon_device *rdev); | 2478 | void r100_pll_errata_after_index(struct radeon_device *rdev); |
@@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); | |||
2569 | #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) | 2648 | #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) |
2570 | #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) | 2649 | #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) |
2571 | #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) | 2650 | #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) |
2651 | #define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e)) | ||
2572 | 2652 | ||
2573 | /* Common functions */ | 2653 | /* Common functions */ |
2574 | /* AGP */ | 2654 | /* AGP */ |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 630853b96841..5003385a7512 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1037,6 +1037,7 @@ static struct radeon_asic rv6xx_asic = { | |||
1037 | .set_pcie_lanes = &r600_set_pcie_lanes, | 1037 | .set_pcie_lanes = &r600_set_pcie_lanes, |
1038 | .set_clock_gating = NULL, | 1038 | .set_clock_gating = NULL, |
1039 | .get_temperature = &rv6xx_get_temp, | 1039 | .get_temperature = &rv6xx_get_temp, |
1040 | .set_uvd_clocks = &r600_set_uvd_clocks, | ||
1040 | }, | 1041 | }, |
1041 | .dpm = { | 1042 | .dpm = { |
1042 | .init = &rv6xx_dpm_init, | 1043 | .init = &rv6xx_dpm_init, |
@@ -1126,6 +1127,7 @@ static struct radeon_asic rs780_asic = { | |||
1126 | .set_pcie_lanes = NULL, | 1127 | .set_pcie_lanes = NULL, |
1127 | .set_clock_gating = NULL, | 1128 | .set_clock_gating = NULL, |
1128 | .get_temperature = &rv6xx_get_temp, | 1129 | .get_temperature = &rv6xx_get_temp, |
1130 | .set_uvd_clocks = &r600_set_uvd_clocks, | ||
1129 | }, | 1131 | }, |
1130 | .dpm = { | 1132 | .dpm = { |
1131 | .init = &rs780_dpm_init, | 1133 | .init = &rs780_dpm_init, |
@@ -1141,6 +1143,7 @@ static struct radeon_asic rs780_asic = { | |||
1141 | .get_mclk = &rs780_dpm_get_mclk, | 1143 | .get_mclk = &rs780_dpm_get_mclk, |
1142 | .print_power_state = &rs780_dpm_print_power_state, | 1144 | .print_power_state = &rs780_dpm_print_power_state, |
1143 | .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, | 1145 | .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, |
1146 | .force_performance_level = &rs780_dpm_force_performance_level, | ||
1144 | }, | 1147 | }, |
1145 | .pflip = { | 1148 | .pflip = { |
1146 | .pre_page_flip = &rs600_pre_page_flip, | 1149 | .pre_page_flip = &rs600_pre_page_flip, |
@@ -1791,6 +1794,7 @@ static struct radeon_asic trinity_asic = { | |||
1791 | .print_power_state = &trinity_dpm_print_power_state, | 1794 | .print_power_state = &trinity_dpm_print_power_state, |
1792 | .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, | 1795 | .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, |
1793 | .force_performance_level = &trinity_dpm_force_performance_level, | 1796 | .force_performance_level = &trinity_dpm_force_performance_level, |
1797 | .enable_bapm = &trinity_dpm_enable_bapm, | ||
1794 | }, | 1798 | }, |
1795 | .pflip = { | 1799 | .pflip = { |
1796 | .pre_page_flip = &evergreen_pre_page_flip, | 1800 | .pre_page_flip = &evergreen_pre_page_flip, |
@@ -2166,6 +2170,7 @@ static struct radeon_asic kv_asic = { | |||
2166 | .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, | 2170 | .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, |
2167 | .force_performance_level = &kv_dpm_force_performance_level, | 2171 | .force_performance_level = &kv_dpm_force_performance_level, |
2168 | .powergate_uvd = &kv_dpm_powergate_uvd, | 2172 | .powergate_uvd = &kv_dpm_powergate_uvd, |
2173 | .enable_bapm = &kv_dpm_enable_bapm, | ||
2169 | }, | 2174 | }, |
2170 | .pflip = { | 2175 | .pflip = { |
2171 | .pre_page_flip = &evergreen_pre_page_flip, | 2176 | .pre_page_flip = &evergreen_pre_page_flip, |
@@ -2390,7 +2395,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2390 | RADEON_CG_SUPPORT_HDP_LS | | 2395 | RADEON_CG_SUPPORT_HDP_LS | |
2391 | RADEON_CG_SUPPORT_HDP_MGCG; | 2396 | RADEON_CG_SUPPORT_HDP_MGCG; |
2392 | rdev->pg_flags = 0 | | 2397 | rdev->pg_flags = 0 | |
2393 | /*RADEON_PG_SUPPORT_GFX_CG | */ | 2398 | /*RADEON_PG_SUPPORT_GFX_PG | */ |
2394 | RADEON_PG_SUPPORT_SDMA; | 2399 | RADEON_PG_SUPPORT_SDMA; |
2395 | break; | 2400 | break; |
2396 | case CHIP_OLAND: | 2401 | case CHIP_OLAND: |
@@ -2479,7 +2484,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2479 | RADEON_CG_SUPPORT_HDP_LS | | 2484 | RADEON_CG_SUPPORT_HDP_LS | |
2480 | RADEON_CG_SUPPORT_HDP_MGCG; | 2485 | RADEON_CG_SUPPORT_HDP_MGCG; |
2481 | rdev->pg_flags = 0; | 2486 | rdev->pg_flags = 0; |
2482 | /*RADEON_PG_SUPPORT_GFX_CG | | 2487 | /*RADEON_PG_SUPPORT_GFX_PG | |
2483 | RADEON_PG_SUPPORT_GFX_SMG | | 2488 | RADEON_PG_SUPPORT_GFX_SMG | |
2484 | RADEON_PG_SUPPORT_GFX_DMG | | 2489 | RADEON_PG_SUPPORT_GFX_DMG | |
2485 | RADEON_PG_SUPPORT_UVD | | 2490 | RADEON_PG_SUPPORT_UVD | |
@@ -2507,7 +2512,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2507 | RADEON_CG_SUPPORT_HDP_LS | | 2512 | RADEON_CG_SUPPORT_HDP_LS | |
2508 | RADEON_CG_SUPPORT_HDP_MGCG; | 2513 | RADEON_CG_SUPPORT_HDP_MGCG; |
2509 | rdev->pg_flags = 0; | 2514 | rdev->pg_flags = 0; |
2510 | /*RADEON_PG_SUPPORT_GFX_CG | | 2515 | /*RADEON_PG_SUPPORT_GFX_PG | |
2511 | RADEON_PG_SUPPORT_GFX_SMG | | 2516 | RADEON_PG_SUPPORT_GFX_SMG | |
2512 | RADEON_PG_SUPPORT_UVD | | 2517 | RADEON_PG_SUPPORT_UVD | |
2513 | RADEON_PG_SUPPORT_VCE | | 2518 | RADEON_PG_SUPPORT_VCE | |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 818bbe6b884b..70c29d5e080d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); | |||
389 | u32 r600_get_xclk(struct radeon_device *rdev); | 389 | u32 r600_get_xclk(struct radeon_device *rdev); |
390 | uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); | 390 | uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); |
391 | int rv6xx_get_temp(struct radeon_device *rdev); | 391 | int rv6xx_get_temp(struct radeon_device *rdev); |
392 | int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); | ||
392 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev); | 393 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev); |
393 | void r600_dpm_post_set_power_state(struct radeon_device *rdev); | 394 | void r600_dpm_post_set_power_state(struct radeon_device *rdev); |
394 | /* r600 dma */ | 395 | /* r600 dma */ |
@@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev, | |||
428 | struct radeon_ps *ps); | 429 | struct radeon_ps *ps); |
429 | void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 430 | void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
430 | struct seq_file *m); | 431 | struct seq_file *m); |
432 | int rs780_dpm_force_performance_level(struct radeon_device *rdev, | ||
433 | enum radeon_dpm_forced_level level); | ||
431 | 434 | ||
432 | /* | 435 | /* |
433 | * rv770,rv730,rv710,rv740 | 436 | * rv770,rv730,rv710,rv740 |
@@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r | |||
625 | struct seq_file *m); | 628 | struct seq_file *m); |
626 | int trinity_dpm_force_performance_level(struct radeon_device *rdev, | 629 | int trinity_dpm_force_performance_level(struct radeon_device *rdev, |
627 | enum radeon_dpm_forced_level level); | 630 | enum radeon_dpm_forced_level level); |
631 | void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable); | ||
628 | 632 | ||
629 | /* DCE6 - SI */ | 633 | /* DCE6 - SI */ |
630 | void dce6_bandwidth_update(struct radeon_device *rdev); | 634 | void dce6_bandwidth_update(struct radeon_device *rdev); |
@@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | |||
781 | int kv_dpm_force_performance_level(struct radeon_device *rdev, | 785 | int kv_dpm_force_performance_level(struct radeon_device *rdev, |
782 | enum radeon_dpm_forced_level level); | 786 | enum radeon_dpm_forced_level level); |
783 | void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); | 787 | void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); |
788 | void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable); | ||
784 | 789 | ||
785 | /* uvd v1.0 */ | 790 | /* uvd v1.0 */ |
786 | uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, | 791 | uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 2399f25ec037..cbbdc8500881 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct | |||
396 | } | 396 | } |
397 | } | 397 | } |
398 | 398 | ||
399 | if (property == rdev->mode_info.audio_property) { | ||
400 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
401 | /* need to find digital encoder on connector */ | ||
402 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
403 | if (!encoder) | ||
404 | return 0; | ||
405 | |||
406 | radeon_encoder = to_radeon_encoder(encoder); | ||
407 | |||
408 | if (radeon_connector->audio != val) { | ||
409 | radeon_connector->audio = val; | ||
410 | radeon_property_change_mode(&radeon_encoder->base); | ||
411 | } | ||
412 | } | ||
413 | |||
399 | if (property == rdev->mode_info.underscan_property) { | 414 | if (property == rdev->mode_info.underscan_property) { |
400 | /* need to find digital encoder on connector */ | 415 | /* need to find digital encoder on connector */ |
401 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | 416 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); |
@@ -1619,6 +1634,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1619 | drm_object_attach_property(&radeon_connector->base.base, | 1634 | drm_object_attach_property(&radeon_connector->base.base, |
1620 | rdev->mode_info.underscan_vborder_property, | 1635 | rdev->mode_info.underscan_vborder_property, |
1621 | 0); | 1636 | 0); |
1637 | drm_object_attach_property(&radeon_connector->base.base, | ||
1638 | rdev->mode_info.audio_property, | ||
1639 | RADEON_AUDIO_DISABLE); | ||
1622 | subpixel_order = SubPixelHorizontalRGB; | 1640 | subpixel_order = SubPixelHorizontalRGB; |
1623 | connector->interlace_allowed = true; | 1641 | connector->interlace_allowed = true; |
1624 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) | 1642 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) |
@@ -1708,6 +1726,11 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1708 | rdev->mode_info.underscan_vborder_property, | 1726 | rdev->mode_info.underscan_vborder_property, |
1709 | 0); | 1727 | 0); |
1710 | } | 1728 | } |
1729 | if (ASIC_IS_DCE2(rdev)) { | ||
1730 | drm_object_attach_property(&radeon_connector->base.base, | ||
1731 | rdev->mode_info.audio_property, | ||
1732 | RADEON_AUDIO_DISABLE); | ||
1733 | } | ||
1711 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { | 1734 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { |
1712 | radeon_connector->dac_load_detect = true; | 1735 | radeon_connector->dac_load_detect = true; |
1713 | drm_object_attach_property(&radeon_connector->base.base, | 1736 | drm_object_attach_property(&radeon_connector->base.base, |
@@ -1748,6 +1771,11 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1748 | rdev->mode_info.underscan_vborder_property, | 1771 | rdev->mode_info.underscan_vborder_property, |
1749 | 0); | 1772 | 0); |
1750 | } | 1773 | } |
1774 | if (ASIC_IS_DCE2(rdev)) { | ||
1775 | drm_object_attach_property(&radeon_connector->base.base, | ||
1776 | rdev->mode_info.audio_property, | ||
1777 | RADEON_AUDIO_DISABLE); | ||
1778 | } | ||
1751 | subpixel_order = SubPixelHorizontalRGB; | 1779 | subpixel_order = SubPixelHorizontalRGB; |
1752 | connector->interlace_allowed = true; | 1780 | connector->interlace_allowed = true; |
1753 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) | 1781 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) |
@@ -1787,6 +1815,11 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1787 | rdev->mode_info.underscan_vborder_property, | 1815 | rdev->mode_info.underscan_vborder_property, |
1788 | 0); | 1816 | 0); |
1789 | } | 1817 | } |
1818 | if (ASIC_IS_DCE2(rdev)) { | ||
1819 | drm_object_attach_property(&radeon_connector->base.base, | ||
1820 | rdev->mode_info.audio_property, | ||
1821 | RADEON_AUDIO_DISABLE); | ||
1822 | } | ||
1790 | connector->interlace_allowed = true; | 1823 | connector->interlace_allowed = true; |
1791 | /* in theory with a DP to VGA converter... */ | 1824 | /* in theory with a DP to VGA converter... */ |
1792 | connector->doublescan_allowed = false; | 1825 | connector->doublescan_allowed = false; |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a56084410372..27ea00489ecc 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_trace.h" | ||
31 | 32 | ||
32 | static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | 33 | static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) |
33 | { | 34 | { |
@@ -559,6 +560,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
559 | return r; | 560 | return r; |
560 | } | 561 | } |
561 | 562 | ||
563 | trace_radeon_cs(&parser); | ||
564 | |||
562 | r = radeon_cs_ib_chunk(rdev, &parser); | 565 | r = radeon_cs_ib_chunk(rdev, &parser); |
563 | if (r) { | 566 | if (r) { |
564 | goto out; | 567 | goto out; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 16cb8792b1e6..e29faa73b574 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1249 | /* Registers mapping */ | 1249 | /* Registers mapping */ |
1250 | /* TODO: block userspace mapping of io register */ | 1250 | /* TODO: block userspace mapping of io register */ |
1251 | spin_lock_init(&rdev->mmio_idx_lock); | 1251 | spin_lock_init(&rdev->mmio_idx_lock); |
1252 | spin_lock_init(&rdev->smc_idx_lock); | ||
1253 | spin_lock_init(&rdev->pll_idx_lock); | ||
1254 | spin_lock_init(&rdev->mc_idx_lock); | ||
1255 | spin_lock_init(&rdev->pcie_idx_lock); | ||
1256 | spin_lock_init(&rdev->pciep_idx_lock); | ||
1257 | spin_lock_init(&rdev->pif_idx_lock); | ||
1258 | spin_lock_init(&rdev->cg_idx_lock); | ||
1259 | spin_lock_init(&rdev->uvd_idx_lock); | ||
1260 | spin_lock_init(&rdev->rcu_idx_lock); | ||
1261 | spin_lock_init(&rdev->didt_idx_lock); | ||
1262 | spin_lock_init(&rdev->end_idx_lock); | ||
1252 | if (rdev->family >= CHIP_BONAIRE) { | 1263 | if (rdev->family >= CHIP_BONAIRE) { |
1253 | rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); | 1264 | rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); |
1254 | rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); | 1265 | rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b055bddaa94c..0d1aa050d41d 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] = | |||
1172 | { UNDERSCAN_AUTO, "auto" }, | 1172 | { UNDERSCAN_AUTO, "auto" }, |
1173 | }; | 1173 | }; |
1174 | 1174 | ||
1175 | static struct drm_prop_enum_list radeon_audio_enum_list[] = | ||
1176 | { { RADEON_AUDIO_DISABLE, "off" }, | ||
1177 | { RADEON_AUDIO_ENABLE, "on" }, | ||
1178 | { RADEON_AUDIO_AUTO, "auto" }, | ||
1179 | }; | ||
1180 | |||
1175 | static int radeon_modeset_create_props(struct radeon_device *rdev) | 1181 | static int radeon_modeset_create_props(struct radeon_device *rdev) |
1176 | { | 1182 | { |
1177 | int sz; | 1183 | int sz; |
@@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) | |||
1222 | if (!rdev->mode_info.underscan_vborder_property) | 1228 | if (!rdev->mode_info.underscan_vborder_property) |
1223 | return -ENOMEM; | 1229 | return -ENOMEM; |
1224 | 1230 | ||
1231 | sz = ARRAY_SIZE(radeon_audio_enum_list); | ||
1232 | rdev->mode_info.audio_property = | ||
1233 | drm_property_create_enum(rdev->ddev, 0, | ||
1234 | "audio", | ||
1235 | radeon_audio_enum_list, sz); | ||
1236 | |||
1225 | return 0; | 1237 | return 0; |
1226 | } | 1238 | } |
1227 | 1239 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index cb4445f55a96..cdd12dcd988b 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0; | |||
153 | int radeon_testing = 0; | 153 | int radeon_testing = 0; |
154 | int radeon_connector_table = 0; | 154 | int radeon_connector_table = 0; |
155 | int radeon_tv = 1; | 155 | int radeon_tv = 1; |
156 | int radeon_audio = 0; | 156 | int radeon_audio = 1; |
157 | int radeon_disp_priority = 0; | 157 | int radeon_disp_priority = 0; |
158 | int radeon_hw_i2c = 0; | 158 | int radeon_hw_i2c = 0; |
159 | int radeon_pcie_gen2 = -1; | 159 | int radeon_pcie_gen2 = -1; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index d908d8d68f6b..ef63d3f00b2f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -247,6 +247,8 @@ struct radeon_mode_info { | |||
247 | struct drm_property *underscan_property; | 247 | struct drm_property *underscan_property; |
248 | struct drm_property *underscan_hborder_property; | 248 | struct drm_property *underscan_hborder_property; |
249 | struct drm_property *underscan_vborder_property; | 249 | struct drm_property *underscan_vborder_property; |
250 | /* audio */ | ||
251 | struct drm_property *audio_property; | ||
250 | /* hardcoded DFP edid from BIOS */ | 252 | /* hardcoded DFP edid from BIOS */ |
251 | struct edid *bios_hardcoded_edid; | 253 | struct edid *bios_hardcoded_edid; |
252 | int bios_hardcoded_edid_size; | 254 | int bios_hardcoded_edid_size; |
@@ -471,6 +473,12 @@ struct radeon_router { | |||
471 | u8 cd_mux_state; | 473 | u8 cd_mux_state; |
472 | }; | 474 | }; |
473 | 475 | ||
476 | enum radeon_connector_audio { | ||
477 | RADEON_AUDIO_DISABLE = 0, | ||
478 | RADEON_AUDIO_ENABLE = 1, | ||
479 | RADEON_AUDIO_AUTO = 2 | ||
480 | }; | ||
481 | |||
474 | struct radeon_connector { | 482 | struct radeon_connector { |
475 | struct drm_connector base; | 483 | struct drm_connector base; |
476 | uint32_t connector_id; | 484 | uint32_t connector_id; |
@@ -489,6 +497,7 @@ struct radeon_connector { | |||
489 | struct radeon_hpd hpd; | 497 | struct radeon_hpd hpd; |
490 | struct radeon_router router; | 498 | struct radeon_router router; |
491 | struct radeon_i2c_chan *router_bus; | 499 | struct radeon_i2c_chan *router_bus; |
500 | enum radeon_connector_audio audio; | ||
492 | }; | 501 | }; |
493 | 502 | ||
494 | struct radeon_framebuffer { | 503 | struct radeon_framebuffer { |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index d7555369a3e5..d41ac8a4224d 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev, | |||
67 | 67 | ||
68 | void radeon_pm_acpi_event_handler(struct radeon_device *rdev) | 68 | void radeon_pm_acpi_event_handler(struct radeon_device *rdev) |
69 | { | 69 | { |
70 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 70 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { |
71 | mutex_lock(&rdev->pm.mutex); | ||
72 | if (power_supply_is_system_supplied() > 0) | ||
73 | rdev->pm.dpm.ac_power = true; | ||
74 | else | ||
75 | rdev->pm.dpm.ac_power = false; | ||
76 | if (rdev->asic->dpm.enable_bapm) | ||
77 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); | ||
78 | mutex_unlock(&rdev->pm.mutex); | ||
79 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | ||
71 | if (rdev->pm.profile == PM_PROFILE_AUTO) { | 80 | if (rdev->pm.profile == PM_PROFILE_AUTO) { |
72 | mutex_lock(&rdev->pm.mutex); | 81 | mutex_lock(&rdev->pm.mutex); |
73 | radeon_pm_update_profile(rdev); | 82 | radeon_pm_update_profile(rdev); |
@@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev, | |||
333 | struct device_attribute *attr, | 342 | struct device_attribute *attr, |
334 | char *buf) | 343 | char *buf) |
335 | { | 344 | { |
336 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 345 | struct drm_device *ddev = dev_get_drvdata(dev); |
337 | struct radeon_device *rdev = ddev->dev_private; | 346 | struct radeon_device *rdev = ddev->dev_private; |
338 | int cp = rdev->pm.profile; | 347 | int cp = rdev->pm.profile; |
339 | 348 | ||
@@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev, | |||
349 | const char *buf, | 358 | const char *buf, |
350 | size_t count) | 359 | size_t count) |
351 | { | 360 | { |
352 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 361 | struct drm_device *ddev = dev_get_drvdata(dev); |
353 | struct radeon_device *rdev = ddev->dev_private; | 362 | struct radeon_device *rdev = ddev->dev_private; |
354 | 363 | ||
355 | mutex_lock(&rdev->pm.mutex); | 364 | mutex_lock(&rdev->pm.mutex); |
@@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev, | |||
383 | struct device_attribute *attr, | 392 | struct device_attribute *attr, |
384 | char *buf) | 393 | char *buf) |
385 | { | 394 | { |
386 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 395 | struct drm_device *ddev = dev_get_drvdata(dev); |
387 | struct radeon_device *rdev = ddev->dev_private; | 396 | struct radeon_device *rdev = ddev->dev_private; |
388 | int pm = rdev->pm.pm_method; | 397 | int pm = rdev->pm.pm_method; |
389 | 398 | ||
@@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
397 | const char *buf, | 406 | const char *buf, |
398 | size_t count) | 407 | size_t count) |
399 | { | 408 | { |
400 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 409 | struct drm_device *ddev = dev_get_drvdata(dev); |
401 | struct radeon_device *rdev = ddev->dev_private; | 410 | struct radeon_device *rdev = ddev->dev_private; |
402 | 411 | ||
403 | /* we don't support the legacy modes with dpm */ | 412 | /* we don't support the legacy modes with dpm */ |
@@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev, | |||
433 | struct device_attribute *attr, | 442 | struct device_attribute *attr, |
434 | char *buf) | 443 | char *buf) |
435 | { | 444 | { |
436 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 445 | struct drm_device *ddev = dev_get_drvdata(dev); |
437 | struct radeon_device *rdev = ddev->dev_private; | 446 | struct radeon_device *rdev = ddev->dev_private; |
438 | enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; | 447 | enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; |
439 | 448 | ||
@@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev, | |||
447 | const char *buf, | 456 | const char *buf, |
448 | size_t count) | 457 | size_t count) |
449 | { | 458 | { |
450 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 459 | struct drm_device *ddev = dev_get_drvdata(dev); |
451 | struct radeon_device *rdev = ddev->dev_private; | 460 | struct radeon_device *rdev = ddev->dev_private; |
452 | 461 | ||
453 | mutex_lock(&rdev->pm.mutex); | 462 | mutex_lock(&rdev->pm.mutex); |
@@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, | |||
472 | struct device_attribute *attr, | 481 | struct device_attribute *attr, |
473 | char *buf) | 482 | char *buf) |
474 | { | 483 | { |
475 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 484 | struct drm_device *ddev = dev_get_drvdata(dev); |
476 | struct radeon_device *rdev = ddev->dev_private; | 485 | struct radeon_device *rdev = ddev->dev_private; |
477 | enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; | 486 | enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; |
478 | 487 | ||
@@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, | |||
486 | const char *buf, | 495 | const char *buf, |
487 | size_t count) | 496 | size_t count) |
488 | { | 497 | { |
489 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 498 | struct drm_device *ddev = dev_get_drvdata(dev); |
490 | struct radeon_device *rdev = ddev->dev_private; | 499 | struct radeon_device *rdev = ddev->dev_private; |
491 | enum radeon_dpm_forced_level level; | 500 | enum radeon_dpm_forced_level level; |
492 | int ret = 0; | 501 | int ret = 0; |
@@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
524 | struct device_attribute *attr, | 533 | struct device_attribute *attr, |
525 | char *buf) | 534 | char *buf) |
526 | { | 535 | { |
527 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 536 | struct drm_device *ddev = dev_get_drvdata(dev); |
528 | struct radeon_device *rdev = ddev->dev_private; | 537 | struct radeon_device *rdev = ddev->dev_private; |
529 | int temp; | 538 | int temp; |
530 | 539 | ||
@@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
536 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); | 545 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); |
537 | } | 546 | } |
538 | 547 | ||
548 | static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, | ||
549 | struct device_attribute *attr, | ||
550 | char *buf) | ||
551 | { | ||
552 | struct drm_device *ddev = dev_get_drvdata(dev); | ||
553 | struct radeon_device *rdev = ddev->dev_private; | ||
554 | int hyst = to_sensor_dev_attr(attr)->index; | ||
555 | int temp; | ||
556 | |||
557 | if (hyst) | ||
558 | temp = rdev->pm.dpm.thermal.min_temp; | ||
559 | else | ||
560 | temp = rdev->pm.dpm.thermal.max_temp; | ||
561 | |||
562 | return snprintf(buf, PAGE_SIZE, "%d\n", temp); | ||
563 | } | ||
564 | |||
539 | static ssize_t radeon_hwmon_show_name(struct device *dev, | 565 | static ssize_t radeon_hwmon_show_name(struct device *dev, |
540 | struct device_attribute *attr, | 566 | struct device_attribute *attr, |
541 | char *buf) | 567 | char *buf) |
@@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev, | |||
544 | } | 570 | } |
545 | 571 | ||
546 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); | 572 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); |
573 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); | ||
574 | static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); | ||
547 | static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); | 575 | static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); |
548 | 576 | ||
549 | static struct attribute *hwmon_attributes[] = { | 577 | static struct attribute *hwmon_attributes[] = { |
550 | &sensor_dev_attr_temp1_input.dev_attr.attr, | 578 | &sensor_dev_attr_temp1_input.dev_attr.attr, |
579 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | ||
580 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, | ||
551 | &sensor_dev_attr_name.dev_attr.attr, | 581 | &sensor_dev_attr_name.dev_attr.attr, |
552 | NULL | 582 | NULL |
553 | }; | 583 | }; |
554 | 584 | ||
585 | static umode_t hwmon_attributes_visible(struct kobject *kobj, | ||
586 | struct attribute *attr, int index) | ||
587 | { | ||
588 | struct device *dev = container_of(kobj, struct device, kobj); | ||
589 | struct drm_device *ddev = dev_get_drvdata(dev); | ||
590 | struct radeon_device *rdev = ddev->dev_private; | ||
591 | |||
592 | /* Skip limit attributes if DPM is not enabled */ | ||
593 | if (rdev->pm.pm_method != PM_METHOD_DPM && | ||
594 | (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || | ||
595 | attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) | ||
596 | return 0; | ||
597 | |||
598 | return attr->mode; | ||
599 | } | ||
600 | |||
555 | static const struct attribute_group hwmon_attrgroup = { | 601 | static const struct attribute_group hwmon_attrgroup = { |
556 | .attrs = hwmon_attributes, | 602 | .attrs = hwmon_attributes, |
603 | .is_visible = hwmon_attributes_visible, | ||
557 | }; | 604 | }; |
558 | 605 | ||
559 | static int radeon_hwmon_init(struct radeon_device *rdev) | 606 | static int radeon_hwmon_init(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index eafd8160a155..f7e367815964 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h | |||
@@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create, | |||
27 | TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) | 27 | TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) |
28 | ); | 28 | ); |
29 | 29 | ||
30 | TRACE_EVENT(radeon_cs, | ||
31 | TP_PROTO(struct radeon_cs_parser *p), | ||
32 | TP_ARGS(p), | ||
33 | TP_STRUCT__entry( | ||
34 | __field(u32, ring) | ||
35 | __field(u32, dw) | ||
36 | __field(u32, fences) | ||
37 | ), | ||
38 | |||
39 | TP_fast_assign( | ||
40 | __entry->ring = p->ring; | ||
41 | __entry->dw = p->chunks[p->chunk_ib_idx].length_dw; | ||
42 | __entry->fences = radeon_fence_count_emitted( | ||
43 | p->rdev, p->ring); | ||
44 | ), | ||
45 | TP_printk("ring=%u, dw=%u, fences=%u", | ||
46 | __entry->ring, __entry->dw, | ||
47 | __entry->fences) | ||
48 | ); | ||
49 | |||
30 | DECLARE_EVENT_CLASS(radeon_fence_request, | 50 | DECLARE_EVENT_CLASS(radeon_fence_request, |
31 | 51 | ||
32 | TP_PROTO(struct drm_device *dev, u32 seqno), | 52 | TP_PROTO(struct drm_device *dev, u32 seqno), |
@@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, | |||
53 | TP_ARGS(dev, seqno) | 73 | TP_ARGS(dev, seqno) |
54 | ); | 74 | ); |
55 | 75 | ||
56 | DEFINE_EVENT(radeon_fence_request, radeon_fence_retire, | ||
57 | |||
58 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
59 | |||
60 | TP_ARGS(dev, seqno) | ||
61 | ); | ||
62 | |||
63 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, | 76 | DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, |
64 | 77 | ||
65 | TP_PROTO(struct drm_device *dev, u32 seqno), | 78 | TP_PROTO(struct drm_device *dev, u32 seqno), |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index b8074a8ec75a..9566b5940a5a 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev) | |||
274 | 274 | ||
275 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 275 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
276 | { | 276 | { |
277 | unsigned long flags; | ||
277 | uint32_t r; | 278 | uint32_t r; |
278 | 279 | ||
280 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
279 | WREG32(RS480_NB_MC_INDEX, reg & 0xff); | 281 | WREG32(RS480_NB_MC_INDEX, reg & 0xff); |
280 | r = RREG32(RS480_NB_MC_DATA); | 282 | r = RREG32(RS480_NB_MC_DATA); |
281 | WREG32(RS480_NB_MC_INDEX, 0xff); | 283 | WREG32(RS480_NB_MC_INDEX, 0xff); |
284 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
282 | return r; | 285 | return r; |
283 | } | 286 | } |
284 | 287 | ||
285 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 288 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
286 | { | 289 | { |
290 | unsigned long flags; | ||
291 | |||
292 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
287 | WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); | 293 | WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); |
288 | WREG32(RS480_NB_MC_DATA, (v)); | 294 | WREG32(RS480_NB_MC_DATA, (v)); |
289 | WREG32(RS480_NB_MC_INDEX, 0xff); | 295 | WREG32(RS480_NB_MC_INDEX, 0xff); |
296 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
290 | } | 297 | } |
291 | 298 | ||
292 | #if defined(CONFIG_DEBUG_FS) | 299 | #if defined(CONFIG_DEBUG_FS) |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 670b555d2ca2..6acba8017b9a 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev) | |||
847 | 847 | ||
848 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 848 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
849 | { | 849 | { |
850 | unsigned long flags; | ||
851 | u32 r; | ||
852 | |||
853 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
850 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | | 854 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
851 | S_000070_MC_IND_CITF_ARB0(1)); | 855 | S_000070_MC_IND_CITF_ARB0(1)); |
852 | return RREG32(R_000074_MC_IND_DATA); | 856 | r = RREG32(R_000074_MC_IND_DATA); |
857 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
858 | return r; | ||
853 | } | 859 | } |
854 | 860 | ||
855 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 861 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
856 | { | 862 | { |
863 | unsigned long flags; | ||
864 | |||
865 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
857 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | | 866 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
858 | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); | 867 | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); |
859 | WREG32(R_000074_MC_IND_DATA, v); | 868 | WREG32(R_000074_MC_IND_DATA, v); |
869 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
860 | } | 870 | } |
861 | 871 | ||
862 | static void rs600_debugfs(struct radeon_device *rdev) | 872 | static void rs600_debugfs(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index d8ddfb34545d..1447d794c22a 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
631 | 631 | ||
632 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 632 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
633 | { | 633 | { |
634 | unsigned long flags; | ||
634 | uint32_t r; | 635 | uint32_t r; |
635 | 636 | ||
637 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
636 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); | 638 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); |
637 | r = RREG32(R_00007C_MC_DATA); | 639 | r = RREG32(R_00007C_MC_DATA); |
638 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); | 640 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); |
641 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
639 | return r; | 642 | return r; |
640 | } | 643 | } |
641 | 644 | ||
642 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 645 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
643 | { | 646 | { |
647 | unsigned long flags; | ||
648 | |||
649 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
644 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | | 650 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | |
645 | S_000078_MC_IND_WR_EN(1)); | 651 | S_000078_MC_IND_WR_EN(1)); |
646 | WREG32(R_00007C_MC_DATA, v); | 652 | WREG32(R_00007C_MC_DATA, v); |
647 | WREG32(R_000078_MC_INDEX, 0x7F); | 653 | WREG32(R_000078_MC_INDEX, 0x7F); |
654 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
648 | } | 655 | } |
649 | 656 | ||
650 | static void rs690_mc_program(struct radeon_device *rdev) | 657 | static void rs690_mc_program(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index d1a1ce73bd45..828a7764660c 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c | |||
@@ -376,9 +376,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev) | |||
376 | WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); | 376 | WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); |
377 | } | 377 | } |
378 | 378 | ||
379 | static void rs780_force_voltage_to_high(struct radeon_device *rdev) | 379 | static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage) |
380 | { | 380 | { |
381 | struct igp_power_info *pi = rs780_get_pi(rdev); | ||
382 | struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); | 381 | struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); |
383 | 382 | ||
384 | if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && | 383 | if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && |
@@ -390,7 +389,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev) | |||
390 | udelay(1); | 389 | udelay(1); |
391 | 390 | ||
392 | WREG32_P(FVTHROT_PWM_CTRL_REG0, | 391 | WREG32_P(FVTHROT_PWM_CTRL_REG0, |
393 | STARTING_PWM_HIGHTIME(pi->max_voltage), | 392 | STARTING_PWM_HIGHTIME(voltage), |
394 | ~STARTING_PWM_HIGHTIME_MASK); | 393 | ~STARTING_PWM_HIGHTIME_MASK); |
395 | 394 | ||
396 | WREG32_P(FVTHROT_PWM_CTRL_REG0, | 395 | WREG32_P(FVTHROT_PWM_CTRL_REG0, |
@@ -404,6 +403,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev) | |||
404 | WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); | 403 | WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); |
405 | } | 404 | } |
406 | 405 | ||
406 | static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div) | ||
407 | { | ||
408 | struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); | ||
409 | |||
410 | if (current_state->sclk_low == current_state->sclk_high) | ||
411 | return; | ||
412 | |||
413 | WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); | ||
414 | |||
415 | WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div), | ||
416 | ~FORCED_FEEDBACK_DIV_MASK); | ||
417 | WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div), | ||
418 | ~STARTING_FEEDBACK_DIV_MASK); | ||
419 | WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV); | ||
420 | |||
421 | udelay(100); | ||
422 | |||
423 | WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); | ||
424 | } | ||
425 | |||
407 | static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, | 426 | static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, |
408 | struct radeon_ps *new_ps, | 427 | struct radeon_ps *new_ps, |
409 | struct radeon_ps *old_ps) | 428 | struct radeon_ps *old_ps) |
@@ -432,17 +451,7 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, | |||
432 | if (ret) | 451 | if (ret) |
433 | return ret; | 452 | return ret; |
434 | 453 | ||
435 | WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); | 454 | rs780_force_fbdiv(rdev, max_dividers.fb_div); |
436 | |||
437 | WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div), | ||
438 | ~FORCED_FEEDBACK_DIV_MASK); | ||
439 | WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div), | ||
440 | ~STARTING_FEEDBACK_DIV_MASK); | ||
441 | WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV); | ||
442 | |||
443 | udelay(100); | ||
444 | |||
445 | WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); | ||
446 | 455 | ||
447 | if (max_dividers.fb_div > min_dividers.fb_div) { | 456 | if (max_dividers.fb_div > min_dividers.fb_div) { |
448 | WREG32_P(FVTHROT_FBDIV_REG0, | 457 | WREG32_P(FVTHROT_FBDIV_REG0, |
@@ -649,7 +658,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev) | |||
649 | rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); | 658 | rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); |
650 | 659 | ||
651 | if (pi->voltage_control) { | 660 | if (pi->voltage_control) { |
652 | rs780_force_voltage_to_high(rdev); | 661 | rs780_force_voltage(rdev, pi->max_voltage); |
653 | mdelay(5); | 662 | mdelay(5); |
654 | } | 663 | } |
655 | 664 | ||
@@ -717,14 +726,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev, | |||
717 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { | 726 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { |
718 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); | 727 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); |
719 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); | 728 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); |
720 | } else if (r600_is_uvd_state(rps->class, rps->class2)) { | ||
721 | rps->vclk = RS780_DEFAULT_VCLK_FREQ; | ||
722 | rps->dclk = RS780_DEFAULT_DCLK_FREQ; | ||
723 | } else { | 729 | } else { |
724 | rps->vclk = 0; | 730 | rps->vclk = 0; |
725 | rps->dclk = 0; | 731 | rps->dclk = 0; |
726 | } | 732 | } |
727 | 733 | ||
734 | if (r600_is_uvd_state(rps->class, rps->class2)) { | ||
735 | if ((rps->vclk == 0) || (rps->dclk == 0)) { | ||
736 | rps->vclk = RS780_DEFAULT_VCLK_FREQ; | ||
737 | rps->dclk = RS780_DEFAULT_DCLK_FREQ; | ||
738 | } | ||
739 | } | ||
740 | |||
728 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) | 741 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) |
729 | rdev->pm.dpm.boot_ps = rps; | 742 | rdev->pm.dpm.boot_ps = rps; |
730 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | 743 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) |
@@ -986,3 +999,53 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde | |||
986 | seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", | 999 | seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", |
987 | ps->sclk_high, ps->max_voltage); | 1000 | ps->sclk_high, ps->max_voltage); |
988 | } | 1001 | } |
1002 | |||
1003 | int rs780_dpm_force_performance_level(struct radeon_device *rdev, | ||
1004 | enum radeon_dpm_forced_level level) | ||
1005 | { | ||
1006 | struct igp_power_info *pi = rs780_get_pi(rdev); | ||
1007 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | ||
1008 | struct igp_ps *ps = rs780_get_ps(rps); | ||
1009 | struct atom_clock_dividers dividers; | ||
1010 | int ret; | ||
1011 | |||
1012 | rs780_clk_scaling_enable(rdev, false); | ||
1013 | rs780_voltage_scaling_enable(rdev, false); | ||
1014 | |||
1015 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | ||
1016 | if (pi->voltage_control) | ||
1017 | rs780_force_voltage(rdev, pi->max_voltage); | ||
1018 | |||
1019 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, | ||
1020 | ps->sclk_high, false, ÷rs); | ||
1021 | if (ret) | ||
1022 | return ret; | ||
1023 | |||
1024 | rs780_force_fbdiv(rdev, dividers.fb_div); | ||
1025 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { | ||
1026 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, | ||
1027 | ps->sclk_low, false, ÷rs); | ||
1028 | if (ret) | ||
1029 | return ret; | ||
1030 | |||
1031 | rs780_force_fbdiv(rdev, dividers.fb_div); | ||
1032 | |||
1033 | if (pi->voltage_control) | ||
1034 | rs780_force_voltage(rdev, pi->min_voltage); | ||
1035 | } else { | ||
1036 | if (pi->voltage_control) | ||
1037 | rs780_force_voltage(rdev, pi->max_voltage); | ||
1038 | |||
1039 | WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV); | ||
1040 | rs780_clk_scaling_enable(rdev, true); | ||
1041 | |||
1042 | if (pi->voltage_control) { | ||
1043 | rs780_voltage_scaling_enable(rdev, true); | ||
1044 | rs780_enable_voltage_scaling(rdev, rps); | ||
1045 | } | ||
1046 | } | ||
1047 | |||
1048 | rdev->pm.dpm.forced_level = level; | ||
1049 | |||
1050 | return 0; | ||
1051 | } | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 8ea1573ae820..873eb4b193b4 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev) | |||
209 | 209 | ||
210 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 210 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
211 | { | 211 | { |
212 | unsigned long flags; | ||
212 | uint32_t r; | 213 | uint32_t r; |
213 | 214 | ||
215 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
214 | WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); | 216 | WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); |
215 | r = RREG32(MC_IND_DATA); | 217 | r = RREG32(MC_IND_DATA); |
216 | WREG32(MC_IND_INDEX, 0); | 218 | WREG32(MC_IND_INDEX, 0); |
219 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
220 | |||
217 | return r; | 221 | return r; |
218 | } | 222 | } |
219 | 223 | ||
220 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 224 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
221 | { | 225 | { |
226 | unsigned long flags; | ||
227 | |||
228 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); | ||
222 | WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); | 229 | WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); |
223 | WREG32(MC_IND_DATA, (v)); | 230 | WREG32(MC_IND_DATA, (v)); |
224 | WREG32(MC_IND_INDEX, 0); | 231 | WREG32(MC_IND_INDEX, 0); |
232 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); | ||
225 | } | 233 | } |
226 | 234 | ||
227 | #if defined(CONFIG_DEBUG_FS) | 235 | #if defined(CONFIG_DEBUG_FS) |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 8cbb85dae5aa..7282ce7dab76 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -2147,14 +2147,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev, | |||
2147 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { | 2147 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { |
2148 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); | 2148 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); |
2149 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); | 2149 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); |
2150 | } else if (r600_is_uvd_state(rps->class, rps->class2)) { | ||
2151 | rps->vclk = RV770_DEFAULT_VCLK_FREQ; | ||
2152 | rps->dclk = RV770_DEFAULT_DCLK_FREQ; | ||
2153 | } else { | 2150 | } else { |
2154 | rps->vclk = 0; | 2151 | rps->vclk = 0; |
2155 | rps->dclk = 0; | 2152 | rps->dclk = 0; |
2156 | } | 2153 | } |
2157 | 2154 | ||
2155 | if (r600_is_uvd_state(rps->class, rps->class2)) { | ||
2156 | if ((rps->vclk == 0) || (rps->dclk == 0)) { | ||
2157 | rps->vclk = RV770_DEFAULT_VCLK_FREQ; | ||
2158 | rps->dclk = RV770_DEFAULT_DCLK_FREQ; | ||
2159 | } | ||
2160 | } | ||
2161 | |||
2158 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) | 2162 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) |
2159 | rdev->pm.dpm.boot_ps = rps; | 2163 | rdev->pm.dpm.boot_ps = rps; |
2160 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | 2164 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) |
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c index ab95da570215..b2a224407365 100644 --- a/drivers/gpu/drm/radeon/rv770_smc.c +++ b/drivers/gpu/drm/radeon/rv770_smc.c | |||
@@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] = | |||
274 | 0x08, 0x72, 0x08, 0x72 | 274 | 0x08, 0x72, 0x08, 0x72 |
275 | }; | 275 | }; |
276 | 276 | ||
277 | int rv770_set_smc_sram_address(struct radeon_device *rdev, | 277 | static int rv770_set_smc_sram_address(struct radeon_device *rdev, |
278 | u16 smc_address, u16 limit) | 278 | u16 smc_address, u16 limit) |
279 | { | 279 | { |
280 | u32 addr; | 280 | u32 addr; |
281 | 281 | ||
@@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev, | |||
296 | u16 smc_start_address, const u8 *src, | 296 | u16 smc_start_address, const u8 *src, |
297 | u16 byte_count, u16 limit) | 297 | u16 byte_count, u16 limit) |
298 | { | 298 | { |
299 | unsigned long flags; | ||
299 | u32 data, original_data, extra_shift; | 300 | u32 data, original_data, extra_shift; |
300 | u16 addr; | 301 | u16 addr; |
301 | int ret; | 302 | int ret = 0; |
302 | 303 | ||
303 | if (smc_start_address & 3) | 304 | if (smc_start_address & 3) |
304 | return -EINVAL; | 305 | return -EINVAL; |
@@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev, | |||
307 | 308 | ||
308 | addr = smc_start_address; | 309 | addr = smc_start_address; |
309 | 310 | ||
311 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
310 | while (byte_count >= 4) { | 312 | while (byte_count >= 4) { |
311 | /* SMC address space is BE */ | 313 | /* SMC address space is BE */ |
312 | data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; | 314 | data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; |
313 | 315 | ||
314 | ret = rv770_set_smc_sram_address(rdev, addr, limit); | 316 | ret = rv770_set_smc_sram_address(rdev, addr, limit); |
315 | if (ret) | 317 | if (ret) |
316 | return ret; | 318 | goto done; |
317 | 319 | ||
318 | WREG32(SMC_SRAM_DATA, data); | 320 | WREG32(SMC_SRAM_DATA, data); |
319 | 321 | ||
@@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev, | |||
328 | 330 | ||
329 | ret = rv770_set_smc_sram_address(rdev, addr, limit); | 331 | ret = rv770_set_smc_sram_address(rdev, addr, limit); |
330 | if (ret) | 332 | if (ret) |
331 | return ret; | 333 | goto done; |
332 | 334 | ||
333 | original_data = RREG32(SMC_SRAM_DATA); | 335 | original_data = RREG32(SMC_SRAM_DATA); |
334 | 336 | ||
@@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev, | |||
346 | 348 | ||
347 | ret = rv770_set_smc_sram_address(rdev, addr, limit); | 349 | ret = rv770_set_smc_sram_address(rdev, addr, limit); |
348 | if (ret) | 350 | if (ret) |
349 | return ret; | 351 | goto done; |
350 | 352 | ||
351 | WREG32(SMC_SRAM_DATA, data); | 353 | WREG32(SMC_SRAM_DATA, data); |
352 | } | 354 | } |
353 | 355 | ||
354 | return 0; | 356 | done: |
357 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
358 | |||
359 | return ret; | ||
355 | } | 360 | } |
356 | 361 | ||
357 | static int rv770_program_interrupt_vectors(struct radeon_device *rdev, | 362 | static int rv770_program_interrupt_vectors(struct radeon_device *rdev, |
@@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev) | |||
461 | 466 | ||
462 | static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) | 467 | static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) |
463 | { | 468 | { |
469 | unsigned long flags; | ||
464 | u16 i; | 470 | u16 i; |
465 | 471 | ||
472 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
466 | for (i = 0; i < limit; i += 4) { | 473 | for (i = 0; i < limit; i += 4) { |
467 | rv770_set_smc_sram_address(rdev, i, limit); | 474 | rv770_set_smc_sram_address(rdev, i, limit); |
468 | WREG32(SMC_SRAM_DATA, 0); | 475 | WREG32(SMC_SRAM_DATA, 0); |
469 | } | 476 | } |
477 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
470 | } | 478 | } |
471 | 479 | ||
472 | int rv770_load_smc_ucode(struct radeon_device *rdev, | 480 | int rv770_load_smc_ucode(struct radeon_device *rdev, |
@@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev, | |||
595 | int rv770_read_smc_sram_dword(struct radeon_device *rdev, | 603 | int rv770_read_smc_sram_dword(struct radeon_device *rdev, |
596 | u16 smc_address, u32 *value, u16 limit) | 604 | u16 smc_address, u32 *value, u16 limit) |
597 | { | 605 | { |
606 | unsigned long flags; | ||
598 | int ret; | 607 | int ret; |
599 | 608 | ||
609 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
600 | ret = rv770_set_smc_sram_address(rdev, smc_address, limit); | 610 | ret = rv770_set_smc_sram_address(rdev, smc_address, limit); |
601 | if (ret) | 611 | if (ret == 0) |
602 | return ret; | 612 | *value = RREG32(SMC_SRAM_DATA); |
603 | 613 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | |
604 | *value = RREG32(SMC_SRAM_DATA); | ||
605 | 614 | ||
606 | return 0; | 615 | return ret; |
607 | } | 616 | } |
608 | 617 | ||
609 | int rv770_write_smc_sram_dword(struct radeon_device *rdev, | 618 | int rv770_write_smc_sram_dword(struct radeon_device *rdev, |
610 | u16 smc_address, u32 value, u16 limit) | 619 | u16 smc_address, u32 value, u16 limit) |
611 | { | 620 | { |
621 | unsigned long flags; | ||
612 | int ret; | 622 | int ret; |
613 | 623 | ||
624 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
614 | ret = rv770_set_smc_sram_address(rdev, smc_address, limit); | 625 | ret = rv770_set_smc_sram_address(rdev, smc_address, limit); |
615 | if (ret) | 626 | if (ret == 0) |
616 | return ret; | 627 | WREG32(SMC_SRAM_DATA, value); |
628 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
617 | 629 | ||
618 | WREG32(SMC_SRAM_DATA, value); | 630 | return ret; |
619 | |||
620 | return 0; | ||
621 | } | 631 | } |
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h index f78d92a4b325..3b2c963c4880 100644 --- a/drivers/gpu/drm/radeon/rv770_smc.h +++ b/drivers/gpu/drm/radeon/rv770_smc.h | |||
@@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; | |||
187 | #define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C | 187 | #define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C |
188 | #define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 | 188 | #define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 |
189 | 189 | ||
190 | int rv770_set_smc_sram_address(struct radeon_device *rdev, | ||
191 | u16 smc_address, u16 limit); | ||
192 | int rv770_copy_bytes_to_smc(struct radeon_device *rdev, | 190 | int rv770_copy_bytes_to_smc(struct radeon_device *rdev, |
193 | u16 smc_start_address, const u8 *src, | 191 | u16 smc_start_address, const u8 *src, |
194 | u16 byte_count, u16 limit); | 192 | u16 byte_count, u16 limit); |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 3e23b757dcfa..c354c1094967 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -83,6 +83,8 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev, | |||
83 | uint64_t pe, | 83 | uint64_t pe, |
84 | uint64_t addr, unsigned count, | 84 | uint64_t addr, unsigned count, |
85 | uint32_t incr, uint32_t flags); | 85 | uint32_t incr, uint32_t flags); |
86 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, | ||
87 | bool enable); | ||
86 | 88 | ||
87 | static const u32 verde_rlc_save_restore_register_list[] = | 89 | static const u32 verde_rlc_save_restore_register_list[] = |
88 | { | 90 | { |
@@ -3386,6 +3388,8 @@ static int si_cp_resume(struct radeon_device *rdev) | |||
3386 | u32 rb_bufsz; | 3388 | u32 rb_bufsz; |
3387 | int r; | 3389 | int r; |
3388 | 3390 | ||
3391 | si_enable_gui_idle_interrupt(rdev, false); | ||
3392 | |||
3389 | WREG32(CP_SEM_WAIT_TIMER, 0x0); | 3393 | WREG32(CP_SEM_WAIT_TIMER, 0x0); |
3390 | WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); | 3394 | WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); |
3391 | 3395 | ||
@@ -3501,6 +3505,8 @@ static int si_cp_resume(struct radeon_device *rdev) | |||
3501 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | 3505 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
3502 | } | 3506 | } |
3503 | 3507 | ||
3508 | si_enable_gui_idle_interrupt(rdev, true); | ||
3509 | |||
3504 | return 0; | 3510 | return 0; |
3505 | } | 3511 | } |
3506 | 3512 | ||
@@ -4888,7 +4894,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev, | |||
4888 | { | 4894 | { |
4889 | u32 tmp; | 4895 | u32 tmp; |
4890 | 4896 | ||
4891 | if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { | 4897 | if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { |
4892 | tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); | 4898 | tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); |
4893 | WREG32(RLC_TTOP_D, tmp); | 4899 | WREG32(RLC_TTOP_D, tmp); |
4894 | 4900 | ||
@@ -5250,6 +5256,7 @@ void si_update_cg(struct radeon_device *rdev, | |||
5250 | u32 block, bool enable) | 5256 | u32 block, bool enable) |
5251 | { | 5257 | { |
5252 | if (block & RADEON_CG_BLOCK_GFX) { | 5258 | if (block & RADEON_CG_BLOCK_GFX) { |
5259 | si_enable_gui_idle_interrupt(rdev, false); | ||
5253 | /* order matters! */ | 5260 | /* order matters! */ |
5254 | if (enable) { | 5261 | if (enable) { |
5255 | si_enable_mgcg(rdev, true); | 5262 | si_enable_mgcg(rdev, true); |
@@ -5258,6 +5265,7 @@ void si_update_cg(struct radeon_device *rdev, | |||
5258 | si_enable_cgcg(rdev, false); | 5265 | si_enable_cgcg(rdev, false); |
5259 | si_enable_mgcg(rdev, false); | 5266 | si_enable_mgcg(rdev, false); |
5260 | } | 5267 | } |
5268 | si_enable_gui_idle_interrupt(rdev, true); | ||
5261 | } | 5269 | } |
5262 | 5270 | ||
5263 | if (block & RADEON_CG_BLOCK_MC) { | 5271 | if (block & RADEON_CG_BLOCK_MC) { |
@@ -5408,7 +5416,7 @@ static void si_init_pg(struct radeon_device *rdev) | |||
5408 | si_init_dma_pg(rdev); | 5416 | si_init_dma_pg(rdev); |
5409 | } | 5417 | } |
5410 | si_init_ao_cu_mask(rdev); | 5418 | si_init_ao_cu_mask(rdev); |
5411 | if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { | 5419 | if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { |
5412 | si_init_gfx_cgpg(rdev); | 5420 | si_init_gfx_cgpg(rdev); |
5413 | } | 5421 | } |
5414 | si_enable_dma_pg(rdev, true); | 5422 | si_enable_dma_pg(rdev, true); |
@@ -5560,7 +5568,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) | |||
5560 | { | 5568 | { |
5561 | u32 tmp; | 5569 | u32 tmp; |
5562 | 5570 | ||
5563 | WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | 5571 | tmp = RREG32(CP_INT_CNTL_RING0) & |
5572 | (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | ||
5573 | WREG32(CP_INT_CNTL_RING0, tmp); | ||
5564 | WREG32(CP_INT_CNTL_RING1, 0); | 5574 | WREG32(CP_INT_CNTL_RING1, 0); |
5565 | WREG32(CP_INT_CNTL_RING2, 0); | 5575 | WREG32(CP_INT_CNTL_RING2, 0); |
5566 | tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; | 5576 | tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
@@ -5685,7 +5695,7 @@ static int si_irq_init(struct radeon_device *rdev) | |||
5685 | 5695 | ||
5686 | int si_irq_set(struct radeon_device *rdev) | 5696 | int si_irq_set(struct radeon_device *rdev) |
5687 | { | 5697 | { |
5688 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | 5698 | u32 cp_int_cntl; |
5689 | u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; | 5699 | u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; |
5690 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 5700 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
5691 | u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 5701 | u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
@@ -5706,6 +5716,9 @@ int si_irq_set(struct radeon_device *rdev) | |||
5706 | return 0; | 5716 | return 0; |
5707 | } | 5717 | } |
5708 | 5718 | ||
5719 | cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & | ||
5720 | (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | ||
5721 | |||
5709 | if (!ASIC_IS_NODCE(rdev)) { | 5722 | if (!ASIC_IS_NODCE(rdev)) { |
5710 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | 5723 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
5711 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; | 5724 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; |
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c index 5f524c0a541e..d422a1cbf727 100644 --- a/drivers/gpu/drm/radeon/si_smc.c +++ b/drivers/gpu/drm/radeon/si_smc.c | |||
@@ -29,8 +29,8 @@ | |||
29 | #include "ppsmc.h" | 29 | #include "ppsmc.h" |
30 | #include "radeon_ucode.h" | 30 | #include "radeon_ucode.h" |
31 | 31 | ||
32 | int si_set_smc_sram_address(struct radeon_device *rdev, | 32 | static int si_set_smc_sram_address(struct radeon_device *rdev, |
33 | u32 smc_address, u32 limit) | 33 | u32 smc_address, u32 limit) |
34 | { | 34 | { |
35 | if (smc_address & 3) | 35 | if (smc_address & 3) |
36 | return -EINVAL; | 36 | return -EINVAL; |
@@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev, | |||
47 | u32 smc_start_address, | 47 | u32 smc_start_address, |
48 | const u8 *src, u32 byte_count, u32 limit) | 48 | const u8 *src, u32 byte_count, u32 limit) |
49 | { | 49 | { |
50 | int ret; | 50 | unsigned long flags; |
51 | int ret = 0; | ||
51 | u32 data, original_data, addr, extra_shift; | 52 | u32 data, original_data, addr, extra_shift; |
52 | 53 | ||
53 | if (smc_start_address & 3) | 54 | if (smc_start_address & 3) |
@@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev, | |||
57 | 58 | ||
58 | addr = smc_start_address; | 59 | addr = smc_start_address; |
59 | 60 | ||
61 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
60 | while (byte_count >= 4) { | 62 | while (byte_count >= 4) { |
61 | /* SMC address space is BE */ | 63 | /* SMC address space is BE */ |
62 | data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; | 64 | data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; |
63 | 65 | ||
64 | ret = si_set_smc_sram_address(rdev, addr, limit); | 66 | ret = si_set_smc_sram_address(rdev, addr, limit); |
65 | if (ret) | 67 | if (ret) |
66 | return ret; | 68 | goto done; |
67 | 69 | ||
68 | WREG32(SMC_IND_DATA_0, data); | 70 | WREG32(SMC_IND_DATA_0, data); |
69 | 71 | ||
@@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev, | |||
78 | 80 | ||
79 | ret = si_set_smc_sram_address(rdev, addr, limit); | 81 | ret = si_set_smc_sram_address(rdev, addr, limit); |
80 | if (ret) | 82 | if (ret) |
81 | return ret; | 83 | goto done; |
82 | 84 | ||
83 | original_data = RREG32(SMC_IND_DATA_0); | 85 | original_data = RREG32(SMC_IND_DATA_0); |
84 | 86 | ||
@@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev, | |||
96 | 98 | ||
97 | ret = si_set_smc_sram_address(rdev, addr, limit); | 99 | ret = si_set_smc_sram_address(rdev, addr, limit); |
98 | if (ret) | 100 | if (ret) |
99 | return ret; | 101 | goto done; |
100 | 102 | ||
101 | WREG32(SMC_IND_DATA_0, data); | 103 | WREG32(SMC_IND_DATA_0, data); |
102 | } | 104 | } |
103 | return 0; | 105 | |
106 | done: | ||
107 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
108 | |||
109 | return ret; | ||
104 | } | 110 | } |
105 | 111 | ||
106 | void si_start_smc(struct radeon_device *rdev) | 112 | void si_start_smc(struct radeon_device *rdev) |
@@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev) | |||
203 | 209 | ||
204 | int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) | 210 | int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) |
205 | { | 211 | { |
212 | unsigned long flags; | ||
206 | u32 ucode_start_address; | 213 | u32 ucode_start_address; |
207 | u32 ucode_size; | 214 | u32 ucode_size; |
208 | const u8 *src; | 215 | const u8 *src; |
@@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) | |||
241 | return -EINVAL; | 248 | return -EINVAL; |
242 | 249 | ||
243 | src = (const u8 *)rdev->smc_fw->data; | 250 | src = (const u8 *)rdev->smc_fw->data; |
251 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
244 | WREG32(SMC_IND_INDEX_0, ucode_start_address); | 252 | WREG32(SMC_IND_INDEX_0, ucode_start_address); |
245 | WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); | 253 | WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); |
246 | while (ucode_size >= 4) { | 254 | while (ucode_size >= 4) { |
@@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) | |||
253 | ucode_size -= 4; | 261 | ucode_size -= 4; |
254 | } | 262 | } |
255 | WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); | 263 | WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); |
264 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
256 | 265 | ||
257 | return 0; | 266 | return 0; |
258 | } | 267 | } |
@@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) | |||
260 | int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, | 269 | int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, |
261 | u32 *value, u32 limit) | 270 | u32 *value, u32 limit) |
262 | { | 271 | { |
272 | unsigned long flags; | ||
263 | int ret; | 273 | int ret; |
264 | 274 | ||
275 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
265 | ret = si_set_smc_sram_address(rdev, smc_address, limit); | 276 | ret = si_set_smc_sram_address(rdev, smc_address, limit); |
266 | if (ret) | 277 | if (ret == 0) |
267 | return ret; | 278 | *value = RREG32(SMC_IND_DATA_0); |
279 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
268 | 280 | ||
269 | *value = RREG32(SMC_IND_DATA_0); | 281 | return ret; |
270 | return 0; | ||
271 | } | 282 | } |
272 | 283 | ||
273 | int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, | 284 | int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, |
274 | u32 value, u32 limit) | 285 | u32 value, u32 limit) |
275 | { | 286 | { |
287 | unsigned long flags; | ||
276 | int ret; | 288 | int ret; |
277 | 289 | ||
290 | spin_lock_irqsave(&rdev->smc_idx_lock, flags); | ||
278 | ret = si_set_smc_sram_address(rdev, smc_address, limit); | 291 | ret = si_set_smc_sram_address(rdev, smc_address, limit); |
279 | if (ret) | 292 | if (ret == 0) |
280 | return ret; | 293 | WREG32(SMC_IND_DATA_0, value); |
294 | spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); | ||
281 | 295 | ||
282 | WREG32(SMC_IND_DATA_0, value); | 296 | return ret; |
283 | return 0; | ||
284 | } | 297 | } |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index b07b7b8f1aff..4beb9992294a 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
@@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev, | |||
1068 | pi->requested_rps.ps_priv = &pi->requested_ps; | 1068 | pi->requested_rps.ps_priv = &pi->requested_ps; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable) | ||
1072 | { | ||
1073 | struct trinity_power_info *pi = trinity_get_pi(rdev); | ||
1074 | |||
1075 | if (pi->enable_bapm) { | ||
1076 | trinity_acquire_mutex(rdev); | ||
1077 | trinity_dpm_bapm_enable(rdev, enable); | ||
1078 | trinity_release_mutex(rdev); | ||
1079 | } | ||
1080 | } | ||
1081 | |||
1071 | int trinity_dpm_enable(struct radeon_device *rdev) | 1082 | int trinity_dpm_enable(struct radeon_device *rdev) |
1072 | { | 1083 | { |
1073 | struct trinity_power_info *pi = trinity_get_pi(rdev); | 1084 | struct trinity_power_info *pi = trinity_get_pi(rdev); |
@@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev) | |||
1091 | trinity_program_sclk_dpm(rdev); | 1102 | trinity_program_sclk_dpm(rdev); |
1092 | trinity_start_dpm(rdev); | 1103 | trinity_start_dpm(rdev); |
1093 | trinity_wait_for_dpm_enabled(rdev); | 1104 | trinity_wait_for_dpm_enabled(rdev); |
1105 | trinity_dpm_bapm_enable(rdev, false); | ||
1094 | trinity_release_mutex(rdev); | 1106 | trinity_release_mutex(rdev); |
1095 | 1107 | ||
1096 | if (rdev->irq.installed && | 1108 | if (rdev->irq.installed && |
@@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev) | |||
1116 | trinity_release_mutex(rdev); | 1128 | trinity_release_mutex(rdev); |
1117 | return; | 1129 | return; |
1118 | } | 1130 | } |
1131 | trinity_dpm_bapm_enable(rdev, false); | ||
1119 | trinity_disable_clock_power_gating(rdev); | 1132 | trinity_disable_clock_power_gating(rdev); |
1120 | sumo_clear_vc(rdev); | 1133 | sumo_clear_vc(rdev); |
1121 | trinity_wait_for_level_0(rdev); | 1134 | trinity_wait_for_level_0(rdev); |
@@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev) | |||
1212 | 1225 | ||
1213 | trinity_acquire_mutex(rdev); | 1226 | trinity_acquire_mutex(rdev); |
1214 | if (pi->enable_dpm) { | 1227 | if (pi->enable_dpm) { |
1228 | if (pi->enable_bapm) | ||
1229 | trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power); | ||
1215 | trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); | 1230 | trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); |
1216 | trinity_enable_power_level_0(rdev); | 1231 | trinity_enable_power_level_0(rdev); |
1217 | trinity_force_level_0(rdev); | 1232 | trinity_force_level_0(rdev); |
@@ -1854,6 +1869,7 @@ int trinity_dpm_init(struct radeon_device *rdev) | |||
1854 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) | 1869 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) |
1855 | pi->at[i] = TRINITY_AT_DFLT; | 1870 | pi->at[i] = TRINITY_AT_DFLT; |
1856 | 1871 | ||
1872 | pi->enable_bapm = true; | ||
1857 | pi->enable_nbps_policy = true; | 1873 | pi->enable_nbps_policy = true; |
1858 | pi->enable_sclk_ds = true; | 1874 | pi->enable_sclk_ds = true; |
1859 | pi->enable_gfx_power_gating = true; | 1875 | pi->enable_gfx_power_gating = true; |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h index e82df071f8b3..c261657750ca 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.h +++ b/drivers/gpu/drm/radeon/trinity_dpm.h | |||
@@ -108,6 +108,7 @@ struct trinity_power_info { | |||
108 | bool enable_auto_thermal_throttling; | 108 | bool enable_auto_thermal_throttling; |
109 | bool enable_dpm; | 109 | bool enable_dpm; |
110 | bool enable_sclk_ds; | 110 | bool enable_sclk_ds; |
111 | bool enable_bapm; | ||
111 | bool uvd_dpm; | 112 | bool uvd_dpm; |
112 | struct radeon_ps current_rps; | 113 | struct radeon_ps current_rps; |
113 | struct trinity_ps current_ps; | 114 | struct trinity_ps current_ps; |
@@ -118,6 +119,7 @@ struct trinity_power_info { | |||
118 | #define TRINITY_AT_DFLT 30 | 119 | #define TRINITY_AT_DFLT 30 |
119 | 120 | ||
120 | /* trinity_smc.c */ | 121 | /* trinity_smc.c */ |
122 | int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable); | ||
121 | int trinity_dpm_config(struct radeon_device *rdev, bool enable); | 123 | int trinity_dpm_config(struct radeon_device *rdev, bool enable); |
122 | int trinity_uvd_dpm_config(struct radeon_device *rdev); | 124 | int trinity_uvd_dpm_config(struct radeon_device *rdev); |
123 | int trinity_dpm_force_state(struct radeon_device *rdev, u32 n); | 125 | int trinity_dpm_force_state(struct radeon_device *rdev, u32 n); |
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c index a42d89f1830c..9672bcbc7312 100644 --- a/drivers/gpu/drm/radeon/trinity_smc.c +++ b/drivers/gpu/drm/radeon/trinity_smc.c | |||
@@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id) | |||
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
58 | 58 | ||
59 | int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable) | ||
60 | { | ||
61 | if (enable) | ||
62 | return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM); | ||
63 | else | ||
64 | return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM); | ||
65 | } | ||
66 | |||
59 | int trinity_dpm_config(struct radeon_device *rdev, bool enable) | 67 | int trinity_dpm_config(struct radeon_device *rdev, bool enable) |
60 | { | 68 | { |
61 | if (enable) | 69 | if (enable) |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index fd54a14a7c2a..3d79e513c0b3 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -12,11 +12,14 @@ | |||
12 | {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 12 | {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
13 | {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 13 | {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
14 | {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 14 | {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
15 | {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
15 | {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 16 | {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
16 | {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 17 | {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
17 | {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 18 | {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
19 | {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
18 | {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 20 | {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
19 | {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 21 | {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
22 | {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
20 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 23 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
21 | {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 24 | {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
22 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 25 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |