aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYintian Tao <yttao@amd.com>2019-04-09 08:33:20 -0400
committerAlex Deucher <alexander.deucher@amd.com>2019-04-10 14:53:27 -0400
commitbb5a2bdf36a8df79e1437b443299b7fe1ea3abfc (patch)
tree74af2ad9ead13464b323b6c22d95453af597dd87
parentb0960c3592a39f1cc6fcab1793158f3850f72c77 (diff)
drm/amdgpu: support dpm level modification under virtualization v3
Under vega10 virtualuzation, smu ip block will not be added. Therefore, we need add pp clk query and force dpm level function at amdgpu_virt_ops to support the feature. v2: add get_pp_clk existence check and use kzalloc to allocate buf v3: return -ENOMEM for allocation failure and correct the coding style Signed-off-by: Yintian Tao <yttao@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h6
7 files changed, 165 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7cee269ec3e3..3f08be7a913e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2471,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
2471 mutex_init(&adev->virt.vf_errors.lock); 2471 mutex_init(&adev->virt.vf_errors.lock);
2472 hash_init(adev->mn_hash); 2472 hash_init(adev->mn_hash);
2473 mutex_init(&adev->lock_reset); 2473 mutex_init(&adev->lock_reset);
2474 mutex_init(&adev->virt.dpm_mutex);
2474 2475
2475 amdgpu_device_check_arguments(adev); 2476 amdgpu_device_check_arguments(adev);
2476 2477
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 2e376064bad8..b17d0545728e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -696,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
696 if (adev->pm.dpm_enabled) { 696 if (adev->pm.dpm_enabled) {
697 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 697 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
698 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 698 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
699 } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
700 adev->virt.ops->get_pp_clk) {
701 dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
702 dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
699 } else { 703 } else {
700 dev_info.max_engine_clock = adev->clock.default_sclk * 10; 704 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
701 dev_info.max_memory_clock = adev->clock.default_mclk * 10; 705 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 88362019d1dd..4b7a076eea9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -327,6 +327,18 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
327 goto fail; 327 goto fail;
328 } 328 }
329 329
330 if (amdgpu_sriov_vf(adev)) {
331 if (amdgim_is_hwperf(adev) &&
332 adev->virt.ops->force_dpm_level) {
333 mutex_lock(&adev->pm.mutex);
334 adev->virt.ops->force_dpm_level(adev, level);
335 mutex_unlock(&adev->pm.mutex);
336 return count;
337 } else {
338 return -EINVAL;
339 }
340 }
341
330 if (current_level == level) 342 if (current_level == level)
331 return count; 343 return count;
332 344
@@ -790,6 +802,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
790 struct drm_device *ddev = dev_get_drvdata(dev); 802 struct drm_device *ddev = dev_get_drvdata(dev);
791 struct amdgpu_device *adev = ddev->dev_private; 803 struct amdgpu_device *adev = ddev->dev_private;
792 804
805 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
806 adev->virt.ops->get_pp_clk)
807 return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
808
793 if (is_support_sw_smu(adev)) 809 if (is_support_sw_smu(adev))
794 return smu_print_clk_levels(&adev->smu, PP_SCLK, buf); 810 return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
795 else if (adev->powerplay.pp_funcs->print_clock_levels) 811 else if (adev->powerplay.pp_funcs->print_clock_levels)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 462a04e0f5e6..7e7f9ed89ee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -375,4 +375,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
375 } 375 }
376} 376}
377 377
378static uint32_t parse_clk(char *buf, bool min)
379{
380 char *ptr = buf;
381 uint32_t clk = 0;
382
383 do {
384 ptr = strchr(ptr, ':');
385 if (!ptr)
386 break;
387 ptr+=2;
388 clk = simple_strtoul(ptr, NULL, 10);
389 } while (!min);
390
391 return clk * 100;
392}
393
394uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
395{
396 char *buf = NULL;
397 uint32_t clk = 0;
398
399 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
400 if (!buf)
401 return -ENOMEM;
402
403 adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
404 clk = parse_clk(buf, lowest);
405
406 kfree(buf);
407
408 return clk;
409}
410
411uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
412{
413 char *buf = NULL;
414 uint32_t clk = 0;
415
416 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
417 if (!buf)
418 return -ENOMEM;
419
420 adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
421 clk = parse_clk(buf, lowest);
422
423 kfree(buf);
424
425 return clk;
426}
378 427
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 722deefc0a7e..584947b7ccf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
57 int (*reset_gpu)(struct amdgpu_device *adev); 57 int (*reset_gpu)(struct amdgpu_device *adev);
58 int (*wait_reset)(struct amdgpu_device *adev); 58 int (*wait_reset)(struct amdgpu_device *adev);
59 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); 59 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
60 int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
61 int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
60}; 62};
61 63
62/* 64/*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
83 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, 85 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
84 /* VRAM LOST by GIM */ 86 /* VRAM LOST by GIM */
85 AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, 87 AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
88 /* HW PERF SIM in GIM */
89 AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
86}; 90};
87 91
88struct amd_sriov_msg_pf2vf_info_header { 92struct amd_sriov_msg_pf2vf_info_header {
@@ -252,6 +256,8 @@ struct amdgpu_virt {
252 struct amdgpu_vf_error_buffer vf_errors; 256 struct amdgpu_vf_error_buffer vf_errors;
253 struct amdgpu_virt_fw_reserve fw_reserve; 257 struct amdgpu_virt_fw_reserve fw_reserve;
254 uint32_t gim_feature; 258 uint32_t gim_feature;
259 /* protect DPM events to GIM */
260 struct mutex dpm_mutex;
255}; 261};
256 262
257#define amdgpu_sriov_enabled(adev) \ 263#define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
278#endif 284#endif
279} 285}
280 286
287#define amdgim_is_hwperf(adev) \
288 ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
289
281bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 290bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
282void amdgpu_virt_init_setting(struct amdgpu_device *adev); 291void amdgpu_virt_init_setting(struct amdgpu_device *adev);
283uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); 292uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
295 unsigned int key, 304 unsigned int key,
296 unsigned int chksum); 305 unsigned int chksum);
297void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); 306void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
307uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
308uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
298 309
299#endif 310#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 73851ebb3833..8dbad496b29f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
157 xgpu_ai_mailbox_set_valid(adev, false); 157 xgpu_ai_mailbox_set_valid(adev, false);
158} 158}
159 159
160static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
161{
162 int r = 0;
163 u32 req, val, size;
164
165 if (!amdgim_is_hwperf(adev) || buf == NULL)
166 return -EBADRQC;
167
168 switch(type) {
169 case PP_SCLK:
170 req = IDH_IRQ_GET_PP_SCLK;
171 break;
172 case PP_MCLK:
173 req = IDH_IRQ_GET_PP_MCLK;
174 break;
175 default:
176 return -EBADRQC;
177 }
178
179 mutex_lock(&adev->virt.dpm_mutex);
180
181 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
182
183 r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
184 if (!r && adev->fw_vram_usage.va != NULL) {
185 val = RREG32_NO_KIQ(
186 SOC15_REG_OFFSET(NBIO, 0,
187 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
188 size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
189 val), PAGE_SIZE);
190
191 if (size < PAGE_SIZE)
192 strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
193 else
194 size = 0;
195
196 r = size;
197 goto out;
198 }
199
200 r = xgpu_ai_poll_msg(adev, IDH_FAIL);
201 if(r)
202 pr_info("%s DPM request failed",
203 (type == PP_SCLK)? "SCLK" : "MCLK");
204
205out:
206 mutex_unlock(&adev->virt.dpm_mutex);
207 return r;
208}
209
210static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
211{
212 int r = 0;
213 u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
214
215 if (!amdgim_is_hwperf(adev))
216 return -EBADRQC;
217
218 mutex_lock(&adev->virt.dpm_mutex);
219 xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
220
221 r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
222 if (!r)
223 goto out;
224
225 r = xgpu_ai_poll_msg(adev, IDH_FAIL);
226 if (!r)
227 pr_info("DPM request failed");
228 else
229 pr_info("Mailbox is broken");
230
231out:
232 mutex_unlock(&adev->virt.dpm_mutex);
233 return r;
234}
235
160static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 236static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
161 enum idh_request req) 237 enum idh_request req)
162{ 238{
@@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
375 .reset_gpu = xgpu_ai_request_reset, 451 .reset_gpu = xgpu_ai_request_reset,
376 .wait_reset = NULL, 452 .wait_reset = NULL,
377 .trans_msg = xgpu_ai_mailbox_trans_msg, 453 .trans_msg = xgpu_ai_mailbox_trans_msg,
454 .get_pp_clk = xgpu_ai_get_pp_clk,
455 .force_dpm_level = xgpu_ai_force_dpm_level,
378}; 456};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index b4a9ceea334b..39d151b79153 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,10 @@ enum idh_request {
35 IDH_REL_GPU_FINI_ACCESS, 35 IDH_REL_GPU_FINI_ACCESS,
36 IDH_REQ_GPU_RESET_ACCESS, 36 IDH_REQ_GPU_RESET_ACCESS,
37 37
38 IDH_IRQ_FORCE_DPM_LEVEL = 10,
39 IDH_IRQ_GET_PP_SCLK,
40 IDH_IRQ_GET_PP_MCLK,
41
38 IDH_LOG_VF_ERROR = 200, 42 IDH_LOG_VF_ERROR = 200,
39}; 43};
40 44
@@ -43,6 +47,8 @@ enum idh_event {
43 IDH_READY_TO_ACCESS_GPU, 47 IDH_READY_TO_ACCESS_GPU,
44 IDH_FLR_NOTIFICATION, 48 IDH_FLR_NOTIFICATION,
45 IDH_FLR_NOTIFICATION_CMPL, 49 IDH_FLR_NOTIFICATION_CMPL,
50 IDH_SUCCESS,
51 IDH_FAIL,
46 IDH_EVENT_MAX 52 IDH_EVENT_MAX
47}; 53};
48 54