summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/lpwr
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-12-27 05:01:00 -0500
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 05:35:06 -0500
commit7a81883a0d70c3a43ad2841ac235f6dc344c60fb (patch)
tree92923d2efccf90d1961071fa9acde59178a0d688 /drivers/gpu/nvgpu/lpwr
parent505b442551a2e27aa3bc9e608c5a2bc9fccecbc4 (diff)
parent2aa3c85f8e82b3c07c39e677663abd3687c1822a (diff)
Merge remote-tracking branch 'remotes/origin/dev/merge-nvgpu-t18x-into-nvgpu' into dev-kernel
Merge T186 - gp10b/gp106 code into common nvgpu repo Bug 200266498 Change-Id: Ibf100ee38010cbed85c149b69b99147256f9a005 Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/lpwr')
-rw-r--r--drivers/gpu/nvgpu/lpwr/lpwr.c423
-rw-r--r--drivers/gpu/nvgpu/lpwr/lpwr.h92
-rw-r--r--drivers/gpu/nvgpu/lpwr/rppg.c158
-rw-r--r--drivers/gpu/nvgpu/lpwr/rppg.h17
4 files changed, 690 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.c b/drivers/gpu/nvgpu/lpwr/lpwr.c
new file mode 100644
index 00000000..4f8d2eec
--- /dev/null
+++ b/drivers/gpu/nvgpu/lpwr/lpwr.c
@@ -0,0 +1,423 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "gk20a/pmu_gk20a.h"
16#include "gp106/pmu_gp106.h"
17#include "gk20a/pmu_api.h"
18#include "gm206/bios_gm206.h"
19#include "pstate/pstate.h"
20#include "include/bios.h"
21#include "perf/perf.h"
22#include "lpwr.h"
23
24static int get_lpwr_idx_table(struct gk20a *g)
25{
26 u32 *lpwr_idx_table_ptr;
27 u8 *entry_addr;
28 u32 idx;
29 struct nvgpu_lpwr_bios_idx_data *pidx_data =
30 &g->perf_pmu.lpwr.lwpr_bios_data.idx;
31 struct nvgpu_bios_lpwr_idx_table_1x_header header = { 0 };
32 struct nvgpu_bios_lpwr_idx_table_1x_entry entry = { 0 };
33
34 if (g->ops.bios.get_perf_table_ptrs) {
35 lpwr_idx_table_ptr = (u32 *)g->ops.bios.get_perf_table_ptrs(g,
36 g->bios.perf_token, LOWPOWER_TABLE);
37 if (lpwr_idx_table_ptr == NULL)
38 return -EINVAL;
39 } else
40 return -EINVAL;
41
42 memcpy(&header, lpwr_idx_table_ptr,
43 sizeof(struct nvgpu_bios_lpwr_idx_table_1x_header));
44
45 if (header.entry_count >= LPWR_VBIOS_IDX_ENTRY_COUNT_MAX)
46 return -EINVAL;
47
48 pidx_data->base_sampling_period = (u16)header.base_sampling_period;
49
50 /* Parse the LPWR Index Table entries.*/
51 for (idx = 0; idx < header.entry_count; idx++) {
52 entry_addr = (u8 *)lpwr_idx_table_ptr + header.header_size +
53 (idx * header.entry_size);
54
55 memcpy(&entry, entry_addr,
56 sizeof(struct nvgpu_bios_lpwr_idx_table_1x_entry));
57
58 pidx_data->entry[idx].pcie_idx = entry.pcie_idx;
59 pidx_data->entry[idx].gr_idx = entry.gr_idx;
60 pidx_data->entry[idx].ms_idx = entry.ms_idx;
61 pidx_data->entry[idx].di_idx = entry.di_idx;
62 pidx_data->entry[idx].gc6_idx = entry.gc6_idx;
63
64 }
65
66 return 0;
67}
68
69static int get_lpwr_gr_table(struct gk20a *g)
70{
71 u32 *lpwr_gr_table_ptr;
72 u8 *entry_addr;
73 u32 idx;
74 struct nvgpu_lpwr_bios_gr_data *pgr_data =
75 &g->perf_pmu.lpwr.lwpr_bios_data.gr;
76 struct nvgpu_bios_lpwr_gr_table_1x_header header = { 0 };
77 struct nvgpu_bios_lpwr_gr_table_1x_entry entry = { 0 };
78
79 if (g->ops.bios.get_perf_table_ptrs) {
80 lpwr_gr_table_ptr = (u32 *)g->ops.bios.get_perf_table_ptrs(g,
81 g->bios.perf_token, LOWPOWER_GR_TABLE);
82 if (lpwr_gr_table_ptr == NULL)
83 return -EINVAL;
84 } else
85 return -EINVAL;
86
87 memcpy(&header, lpwr_gr_table_ptr,
88 sizeof(struct nvgpu_bios_lpwr_gr_table_1x_header));
89
90 /* Parse the LPWR Index Table entries.*/
91 for (idx = 0; idx < header.entry_count; idx++) {
92 entry_addr = (u8 *)lpwr_gr_table_ptr + header.header_size +
93 (idx * header.entry_size);
94
95 memcpy(&entry, entry_addr,
96 sizeof(struct nvgpu_bios_lpwr_gr_table_1x_entry));
97
98 if (BIOS_GET_FIELD(entry.feautre_mask,
99 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) {
100 pgr_data->entry[idx].gr_enabled = true;
101
102 pgr_data->entry[idx].feature_mask =
103 NVGPU_PMU_GR_FEATURE_MASK_ALL;
104
105 if (!BIOS_GET_FIELD(entry.feautre_mask,
106 NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_RPPG))
107 pgr_data->entry[idx].feature_mask &=
108 ~NVGPU_PMU_GR_FEATURE_MASK_RPPG;
109 }
110
111 }
112
113 return 0;
114}
115
116static int get_lpwr_ms_table(struct gk20a *g)
117{
118 u32 *lpwr_ms_table_ptr;
119 u8 *entry_addr;
120 u32 idx;
121 struct nvgpu_lpwr_bios_ms_data *pms_data =
122 &g->perf_pmu.lpwr.lwpr_bios_data.ms;
123 struct nvgpu_bios_lpwr_ms_table_1x_header header = { 0 };
124 struct nvgpu_bios_lpwr_ms_table_1x_entry entry = { 0 };
125
126 if (g->ops.bios.get_perf_table_ptrs) {
127 lpwr_ms_table_ptr = (u32 *)g->ops.bios.get_perf_table_ptrs(g,
128 g->bios.perf_token, LOWPOWER_MS_TABLE);
129 if (lpwr_ms_table_ptr == NULL)
130 return -EINVAL;
131 } else
132 return -EINVAL;
133
134 memcpy(&header, lpwr_ms_table_ptr,
135 sizeof(struct nvgpu_bios_lpwr_ms_table_1x_header));
136
137 if (header.entry_count >= LPWR_VBIOS_MS_ENTRY_COUNT_MAX)
138 return -EINVAL;
139
140 pms_data->default_entry_idx = (u8)header.default_entry_idx;
141
142 pms_data->idle_threshold_us = (u32)(header.idle_threshold_us * 10);
143
144 /* Parse the LPWR MS Table entries.*/
145 for (idx = 0; idx < header.entry_count; idx++) {
146 entry_addr = (u8 *)lpwr_ms_table_ptr + header.header_size +
147 (idx * header.entry_size);
148
149 memcpy(&entry, entry_addr,
150 sizeof(struct nvgpu_bios_lpwr_ms_table_1x_entry));
151
152 if (BIOS_GET_FIELD(entry.feautre_mask,
153 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) {
154 pms_data->entry[idx].ms_enabled = true;
155
156 pms_data->entry[idx].feature_mask =
157 NVGPU_PMU_MS_FEATURE_MASK_ALL;
158
159 if (!BIOS_GET_FIELD(entry.feautre_mask,
160 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_CLOCK_GATING))
161 pms_data->entry[idx].feature_mask &=
162 ~NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING;
163
164 if (!BIOS_GET_FIELD(entry.feautre_mask,
165 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SWASR))
166 pms_data->entry[idx].feature_mask &=
167 ~NVGPU_PMU_MS_FEATURE_MASK_SW_ASR;
168
169 if (!BIOS_GET_FIELD(entry.feautre_mask,
170 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_RPPG))
171 pms_data->entry[idx].feature_mask &=
172 ~NVGPU_PMU_MS_FEATURE_MASK_RPPG;
173 }
174
175 pms_data->entry[idx].dynamic_current_logic =
176 entry.dynamic_current_logic;
177
178 pms_data->entry[idx].dynamic_current_sram =
179 entry.dynamic_current_sram;
180 }
181
182 return 0;
183}
184
185u32 nvgpu_lpwr_pg_setup(struct gk20a *g)
186{
187 u32 err = 0;
188
189 gk20a_dbg_fn("");
190
191 err = get_lpwr_gr_table(g);
192 if (err)
193 return err;
194
195 err = get_lpwr_ms_table(g);
196 if (err)
197 return err;
198
199 err = get_lpwr_idx_table(g);
200
201 return err;
202}
203
204static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g,
205 struct pmu_msg *msg, void *param,
206 u32 handle, u32 status)
207{
208 u32 *ack_status = param;
209
210 gk20a_dbg_fn("");
211
212 if (status != 0) {
213 gk20a_err(dev_from_gk20a(g), "LWPR PARAM cmd aborted");
214 return;
215 }
216
217 *ack_status = 1;
218
219 gp106_dbg_pmu("lpwr-param is acknowledged from PMU %x",
220 msg->msg.pg.msg_type);
221}
222
223int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate)
224{
225 struct pmu_cmd cmd;
226 u32 seq, status = 0;
227 u32 payload = NV_PMU_PG_PARAM_MCLK_CHANGE_MS_SWASR_ENABLED;
228 struct clk_set_info *pstate_info;
229 u32 ack_status = 0;
230
231 gk20a_dbg_fn("");
232
233 pstate_info = pstate_get_clk_set_info(g, pstate,
234 clkwhich_mclk);
235 if (!pstate_info)
236 return -EINVAL;
237
238 if (pstate_info->max_mhz >
239 MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ)
240 payload |=
241 NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED;
242
243 if (payload != g->perf_pmu.lpwr.mclk_change_cache) {
244 g->perf_pmu.lpwr.mclk_change_cache = payload;
245
246 cmd.hdr.unit_id = PMU_UNIT_PG;
247 cmd.hdr.size = PMU_CMD_HDR_SIZE +
248 sizeof(struct pmu_pg_cmd_mclk_change);
249 cmd.cmd.pg.mclk_change.cmd_type =
250 PMU_PG_CMD_ID_PG_PARAM;
251 cmd.cmd.pg.mclk_change.cmd_id =
252 PMU_PG_PARAM_CMD_MCLK_CHANGE;
253 cmd.cmd.pg.mclk_change.data = payload;
254
255 gp106_dbg_pmu("cmd post MS PMU_PG_PARAM_CMD_MCLK_CHANGE");
256 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
257 PMU_COMMAND_QUEUE_HPQ,
258 nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0);
259
260 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g),
261 &ack_status, 1);
262 if (ack_status == 0) {
263 status = -EINVAL;
264 gk20a_err(dev_from_gk20a(g), "MCLK-CHANGE ACK failed");
265 }
266 }
267
268 return status;
269}
270
271u32 nvgpu_lpwr_post_init(struct gk20a *g)
272{
273 struct pmu_cmd cmd;
274 u32 seq;
275 u32 status = 0;
276 u32 ack_status = 0;
277
278 memset(&cmd, 0, sizeof(struct pmu_cmd));
279 cmd.hdr.unit_id = PMU_UNIT_PG;
280 cmd.hdr.size = PMU_CMD_HDR_SIZE +
281 sizeof(struct pmu_pg_cmd_post_init_param);
282
283 cmd.cmd.pg.post_init.cmd_type =
284 PMU_PG_CMD_ID_PG_PARAM;
285 cmd.cmd.pg.post_init.cmd_id =
286 PMU_PG_PARAM_CMD_POST_INIT;
287
288 gp106_dbg_pmu("cmd post post-init PMU_PG_PARAM_CMD_POST_INIT");
289 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
290 PMU_COMMAND_QUEUE_LPQ,
291 nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0);
292
293 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g),
294 &ack_status, 1);
295 if (ack_status == 0) {
296 status = -EINVAL;
297 gk20a_err(dev_from_gk20a(g), "post-init ack failed");
298 }
299
300 return status;
301}
302
303u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num)
304{
305 struct nvgpu_lpwr_bios_ms_data *pms_data =
306 &g->perf_pmu.lpwr.lwpr_bios_data.ms;
307 struct nvgpu_lpwr_bios_idx_data *pidx_data =
308 &g->perf_pmu.lpwr.lwpr_bios_data.idx;
309 struct pstate *pstate = pstate_find(g, pstate_num);
310 u32 ms_idx;
311
312 gk20a_dbg_fn("");
313
314 if (!pstate)
315 return 0;
316
317 ms_idx = pidx_data->entry[pstate->lpwr_entry_idx].ms_idx;
318 if (pms_data->entry[ms_idx].ms_enabled)
319 return 1;
320 else
321 return 0;
322}
323
324u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
325{
326 struct nvgpu_lpwr_bios_gr_data *pgr_data =
327 &g->perf_pmu.lpwr.lwpr_bios_data.gr;
328 struct nvgpu_lpwr_bios_idx_data *pidx_data =
329 &g->perf_pmu.lpwr.lwpr_bios_data.idx;
330 struct pstate *pstate = pstate_find(g, pstate_num);
331 u32 idx;
332
333 gk20a_dbg_fn("");
334
335 if (!pstate)
336 return 0;
337
338 idx = pidx_data->entry[pstate->lpwr_entry_idx].gr_idx;
339 if (pgr_data->entry[idx].gr_enabled)
340 return 1;
341 else
342 return 0;
343}
344
345
346int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
347{
348 struct pmu_gk20a *pmu = &g->pmu;
349 u32 status = 0;
350 u32 is_mscg_supported = 0;
351 u32 is_rppg_supported = 0;
352 u32 present_pstate = 0;
353
354 gk20a_dbg_fn("");
355
356 if (pstate_lock)
357 nvgpu_clk_arb_pstate_change_lock(g, true);
358 mutex_lock(&pmu->pg_mutex);
359
360 present_pstate = nvgpu_clk_arb_get_current_pstate(g);
361
362 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
363 present_pstate);
364 if (is_mscg_supported && g->mscg_enabled) {
365 if (!pmu->mscg_stat)
366 pmu->mscg_stat = PMU_MSCG_ENABLED;
367 }
368
369 is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
370 present_pstate);
371 if (is_rppg_supported) {
372 if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
373 status = gk20a_pmu_enable_elpg(g);
374 }
375
376 mutex_unlock(&pmu->pg_mutex);
377 if (pstate_lock)
378 nvgpu_clk_arb_pstate_change_lock(g, false);
379
380 return status;
381}
382
383int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
384{
385 struct pmu_gk20a *pmu = &g->pmu;
386 int status = 0;
387 u32 is_mscg_supported = 0;
388 u32 is_rppg_supported = 0;
389 u32 present_pstate = 0;
390
391 gk20a_dbg_fn("");
392
393 if (pstate_lock)
394 nvgpu_clk_arb_pstate_change_lock(g, true);
395 mutex_lock(&pmu->pg_mutex);
396
397 present_pstate = nvgpu_clk_arb_get_current_pstate(g);
398
399 is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
400 present_pstate);
401 if (is_rppg_supported) {
402 if (support_gk20a_pmu(g->dev) && g->elpg_enabled) {
403 status = gk20a_pmu_disable_elpg(g);
404 if (status)
405 goto exit_unlock;
406 }
407 }
408
409 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
410 present_pstate);
411 if (is_mscg_supported && g->mscg_enabled) {
412 if (pmu->mscg_stat)
413 pmu->mscg_stat = PMU_MSCG_DISABLED;
414 }
415
416exit_unlock:
417 mutex_unlock(&pmu->pg_mutex);
418 if (pstate_lock)
419 nvgpu_clk_arb_pstate_change_lock(g, false);
420
421 gk20a_dbg_fn("done");
422 return status;
423}
diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.h b/drivers/gpu/nvgpu/lpwr/lpwr.h
new file mode 100644
index 00000000..6b3259df
--- /dev/null
+++ b/drivers/gpu/nvgpu/lpwr/lpwr.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13#ifndef _MSCG_H_
14#define _MSCG_H_
15
16#define MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ 540
17
18#define NV_PMU_PG_PARAM_MCLK_CHANGE_MS_SWASR_ENABLED BIT(0x1)
19#define NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED BIT(0x3)
20
21#define LPWR_ENTRY_COUNT_MAX 0x06
22
23#define LPWR_VBIOS_IDX_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX)
24
25#define LPWR_VBIOS_IDX_ENTRY_RSVD \
26 (LPWR_VBIOS_IDX_ENTRY_COUNT_MAX - 1)
27
28#define LPWR_VBIOS_BASE_SAMPLING_PERIOD_DEFAULT (500)
29
30struct nvgpu_lpwr_bios_idx_entry {
31 u8 pcie_idx;
32 u8 gr_idx;
33 u8 ms_idx;
34 u8 di_idx;
35 u8 gc6_idx;
36};
37
38struct nvgpu_lpwr_bios_idx_data {
39 u16 base_sampling_period;
40 struct nvgpu_lpwr_bios_idx_entry entry[LPWR_VBIOS_IDX_ENTRY_COUNT_MAX];
41};
42
43#define LPWR_VBIOS_MS_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX)
44
45struct nvgpu_lpwr_bios_ms_entry {
46 bool ms_enabled;
47 u32 feature_mask;
48 u32 asr_efficiency_thresholdl;
49 u16 dynamic_current_logic;
50 u16 dynamic_current_sram;
51};
52
53struct nvgpu_lpwr_bios_ms_data {
54 u8 default_entry_idx;
55 u32 idle_threshold_us;
56 struct nvgpu_lpwr_bios_ms_entry entry[LPWR_VBIOS_MS_ENTRY_COUNT_MAX];
57};
58
59#define LPWR_VBIOS_GR_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX)
60
61struct nvgpu_lpwr_bios_gr_entry {
62 bool gr_enabled;
63 u32 feature_mask;
64};
65
66struct nvgpu_lpwr_bios_gr_data {
67 u8 default_entry_idx;
68 u32 idle_threshold_us;
69 u8 adaptive_gr_multiplier;
70 struct nvgpu_lpwr_bios_gr_entry entry[LPWR_VBIOS_GR_ENTRY_COUNT_MAX];
71};
72
73struct nvgpu_lpwr_bios_data {
74 struct nvgpu_lpwr_bios_idx_data idx;
75 struct nvgpu_lpwr_bios_ms_data ms;
76 struct nvgpu_lpwr_bios_gr_data gr;
77};
78
79struct obj_lwpr {
80 struct nvgpu_lpwr_bios_data lwpr_bios_data;
81 u32 mclk_change_cache;
82};
83
84u32 nvgpu_lpwr_pg_setup(struct gk20a *g);
85int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate);
86int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock);
87int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock);
88u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num);
89u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num);
90u32 nvgpu_lpwr_post_init(struct gk20a *g);
91
92#endif
diff --git a/drivers/gpu/nvgpu/lpwr/rppg.c b/drivers/gpu/nvgpu/lpwr/rppg.c
new file mode 100644
index 00000000..40e857ee
--- /dev/null
+++ b/drivers/gpu/nvgpu/lpwr/rppg.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "gk20a/pmu_gk20a.h"
16#include "gp106/pmu_gp106.h"
17#include "gk20a/pmu_api.h"
18#include "gm206/bios_gm206.h"
19#include "pstate/pstate.h"
20#include "include/bios.h"
21#include "pmuif/gpmuif_pg_rppg.h"
22
23static void pmu_handle_rppg_init_msg(struct gk20a *g, struct pmu_msg *msg,
24 void *param, u32 handle, u32 status)
25{
26
27 u8 ctrlId = NV_PMU_RPPG_CTRL_ID_MAX;
28 u32 *success = param;
29
30 if (status == 0) {
31 switch (msg->msg.pg.rppg_msg.cmn.msg_id) {
32 case NV_PMU_RPPG_MSG_ID_INIT_CTRL_ACK:
33 ctrlId = msg->msg.pg.rppg_msg.init_ctrl_ack.ctrl_id;
34 *success = 1;
35 gp106_dbg_pmu("RPPG is acknowledged from PMU %x",
36 msg->msg.pg.msg_type);
37 break;
38 }
39 }
40
41 gp106_dbg_pmu("RPPG is acknowledged from PMU %x",
42 msg->msg.pg.msg_type);
43}
44
45static u32 rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd)
46{
47 struct pmu_cmd cmd;
48 u32 seq;
49 u32 status = 0;
50 u32 success = 0;
51
52 memset(&cmd, 0, sizeof(struct pmu_cmd));
53 cmd.hdr.unit_id = PMU_UNIT_PG;
54 cmd.hdr.size = PMU_CMD_HDR_SIZE +
55 sizeof(struct nv_pmu_rppg_cmd);
56
57 cmd.cmd.pg.rppg_cmd.cmn.cmd_type = PMU_PMU_PG_CMD_ID_RPPG;
58 cmd.cmd.pg.rppg_cmd.cmn.cmd_id = prppg_cmd->cmn.cmd_id;
59
60 switch (prppg_cmd->cmn.cmd_id) {
61 case NV_PMU_RPPG_CMD_ID_INIT:
62 break;
63 case NV_PMU_RPPG_CMD_ID_INIT_CTRL:
64 cmd.cmd.pg.rppg_cmd.init_ctrl.ctrl_id =
65 prppg_cmd->init_ctrl.ctrl_id;
66 cmd.cmd.pg.rppg_cmd.init_ctrl.domain_id =
67 prppg_cmd->init_ctrl.domain_id;
68 break;
69 case NV_PMU_RPPG_CMD_ID_STATS_RESET:
70 cmd.cmd.pg.rppg_cmd.stats_reset.ctrl_id =
71 prppg_cmd->stats_reset.ctrl_id;
72 break;
73 default:
74 gk20a_err(dev_from_gk20a(g), "Inivalid RPPG command %d",
75 prppg_cmd->cmn.cmd_id);
76 return -1;
77 }
78
79 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
80 pmu_handle_rppg_init_msg, &success, &seq, ~0);
81 if (status) {
82 gk20a_err(dev_from_gk20a(g), "Unable to submit parameter command %d",
83 prppg_cmd->cmn.cmd_id);
84 goto exit;
85 }
86
87 if (prppg_cmd->cmn.cmd_id == NV_PMU_RPPG_CMD_ID_INIT_CTRL) {
88 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g),
89 &success, 1);
90 if (success == 0) {
91 status = -EINVAL;
92 gk20a_err(dev_from_gk20a(g), "Ack for the parameter command %x",
93 prppg_cmd->cmn.cmd_id);
94 }
95 }
96
97exit:
98 return status;
99}
100
101static u32 rppg_init(struct gk20a *g)
102{
103 struct nv_pmu_rppg_cmd rppg_cmd;
104
105 rppg_cmd.init.cmd_id = NV_PMU_RPPG_CMD_ID_INIT;
106
107 return rppg_send_cmd(g, &rppg_cmd);
108}
109
110static u32 rppg_ctrl_init(struct gk20a *g, u8 ctrl_id)
111{
112 struct nv_pmu_rppg_cmd rppg_cmd;
113
114 rppg_cmd.init_ctrl.cmd_id = NV_PMU_RPPG_CMD_ID_INIT_CTRL;
115 rppg_cmd.init_ctrl.ctrl_id = ctrl_id;
116
117 switch (ctrl_id) {
118 case NV_PMU_RPPG_CTRL_ID_GR:
119 case NV_PMU_RPPG_CTRL_ID_MS:
120 rppg_cmd.init_ctrl.domain_id = NV_PMU_RPPG_DOMAIN_ID_GFX;
121 break;
122 }
123
124 return rppg_send_cmd(g, &rppg_cmd);
125}
126
127u32 init_rppg(struct gk20a *g)
128{
129 u32 status;
130
131 status = rppg_init(g);
132 if (status != 0) {
133 gk20a_err(dev_from_gk20a(g),
134 "Failed to initialize RPPG in PMU: 0x%08x", status);
135 return status;
136 }
137
138
139 status = rppg_ctrl_init(g, NV_PMU_RPPG_CTRL_ID_GR);
140 if (status != 0) {
141 gk20a_err(dev_from_gk20a(g),
142 "Failed to initialize RPPG_CTRL: GR in PMU: 0x%08x",
143 status);
144 return status;
145 }
146
147 status = rppg_ctrl_init(g, NV_PMU_RPPG_CTRL_ID_MS);
148 if (status != 0) {
149 gk20a_err(dev_from_gk20a(g),
150 "Failed to initialize RPPG_CTRL: MS in PMU: 0x%08x",
151 status);
152 return status;
153 }
154
155 return status;
156}
157
158
diff --git a/drivers/gpu/nvgpu/lpwr/rppg.h b/drivers/gpu/nvgpu/lpwr/rppg.h
new file mode 100644
index 00000000..8dc8d36c
--- /dev/null
+++ b/drivers/gpu/nvgpu/lpwr/rppg.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13#ifndef _RPPG_H_
14#define _RPPG_H_
15
16u32 init_rppg(struct gk20a *g);
17#endif