summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/sec2_gp106.c
Commit message (Expand)AuthorAge
* gpu: nvgpu: Cleanup generic MM code in gk20a/mm_gk20a.cAlex Waterman2017-10-24
* gpu: nvgpu: Change license for common files to MITTerje Bergstrom2017-09-26
* gpu: nvgpu: falcon bootstrap supportMahantesh Kumbar2017-07-06
* gpu: nvgpu: falcon copy to IMEM supportMahantesh Kumbar2017-07-05
* gpu: nvgpu: Falcon controller halt interrupt status clearMahantesh Kumbar2017-07-04
* gpu: nvgpu: Falcon controller wait for haltMahantesh Kumbar2017-07-04
* gpu: nvgpu: falcon HAL to support SEC2Mahantesh Kumbar2017-07-03
* gpu: nvgpu: PMU reset reorgMahantesh Kumbar2017-06-29
* gpu: nvgpu: use nvgpu_flcn_copy_to_dmem()Mahantesh Kumbar2017-06-27
* gpu: nvgpu: moved & renamed "struct pmu_gk20a"Mahantesh Kumbar2017-06-05
* gpu: nvgpu: gp106: Use new delay APIsTerje Bergstrom2017-04-13
* gpu: nvgpu: gp106: Use new error macrosTerje Bergstrom2017-04-07
* gpu: nvgpu: Rename gk20a_mem_* functionsAlex Waterman2017-04-06
* gpu: nvgpu: Use nvgpu_timeout for all loopsTerje Bergstrom2017-03-27
* gpu: nvgpu: Do not query SEC2 freq from CCFTerje Bergstrom2017-03-14
* gpu: nvgpu: ACR interface headers reorganizationMahantesh Kumbar2017-02-23
* gpu: nvgpu: use common nvgpu mutex/spinlock APIsDeepak Nibade2017-02-22
* gpu: nvgpu: Remove PMU gm204/gm206 supportMahantesh Kumbar2017-02-15
* gpu: nvgpu: Move gp106 HW headersAlex Waterman2017-01-11
* gpu: nvgpu: select target based on apertureDeepak Nibade2016-12-27
* gpu: nvgpu: ACR boot on SEC2Mahantesh Kumbar2016-12-27
u: Add PMU thermal RPC for WARN_TEMP' href='/cgit/cgit.cgi/nvgpu.git/commit/drivers/gpu/nvgpu/therm/thrmpmu.c?h=gpu-paging&id=3621d35f95d6060d87a31164b7884fc1e896989f'>3621d35f
d06966c4



3621d35f

d06966c4

e808d345
d06966c4





3a1104c3
d06966c4












2e338c77
d06966c4










3621d35f
ed60c25d










3621d35f







d06966c4

















































ed60c25d










d06966c4







3621d35f


















d06966c4
3621d35f



d06966c4






3621d35f
3a1104c3
d06966c4

3621d35f


d06966c4

3a1104c3
d06966c4

3621d35f


d06966c4

3a1104c3
d06966c4



3621d35f


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
  
                                                                     











                                                                            

                                     
                    
                                         












                                                                     
                                             




                                                   
                                                 



                                                









                                                                                
                                    










                                                                                         
                                    








                                                                                          
 



                                                                    

                   

                                                                
                                                         





                                         
                            












                                                                                  
                                                              










                                                     
                        










                                                                             







                                                                                 

















































                                                                                       










                                                                             







                                                                  


















                                                                           
                                                          



                                                       






                                                           
                     
                            

                                                                           


                          

                                                         
                            

                                                                         


                          

                                              
                            



                                                              


                      
/*
 * Copyright (c) 2016-2017, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include "gk20a/gk20a.h"
#include "boardobj/boardobjgrp.h"
#include "boardobj/boardobjgrp_e32.h"
#include "thrmpmu.h"
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>

struct therm_pmucmdhandler_params {
	struct nv_pmu_therm_rpc *prpccall;
	u32 success;
};

static void therm_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
			void *param, u32 handle, u32 status)
{
	struct therm_pmucmdhandler_params *phandlerparams =
		(struct therm_pmucmdhandler_params *)param;

	if (msg->msg.therm.msg_type != NV_PMU_THERM_MSG_ID_RPC) {
		nvgpu_err(g, "unknow msg %x",
			msg->msg.pmgr.msg_type);
		return;
	}

	if (!phandlerparams->prpccall->b_supported)
		nvgpu_err(g, "RPC msg %x failed",
			msg->msg.pmgr.msg_type);
	else
		phandlerparams->success = 1;
}

u32 therm_send_pmgr_tables_to_pmu(struct gk20a *g)
{
	u32 status = 0;
	struct boardobjgrp *pboardobjgrp = NULL;

	if (!BOARDOBJGRP_IS_EMPTY(&g->therm_pmu.therm_deviceobjs.super.super)) {
		pboardobjgrp = &g->therm_pmu.therm_deviceobjs.super.super;
		status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
		if (status) {
			nvgpu_err(g,
				"therm_send_pmgr_tables_to_pmu - therm_device failed %x",
				status);
			goto exit;
		}
	}

	if (!BOARDOBJGRP_IS_EMPTY(
			&g->therm_pmu.therm_channelobjs.super.super)) {
		pboardobjgrp = &g->therm_pmu.therm_channelobjs.super.super;
		status = pboardobjgrp->pmuinithandle(g, pboardobjgrp);
		if (status) {
			nvgpu_err(g,
				"therm_send_pmgr_tables_to_pmu - therm_channel failed %x",
				status);
			goto exit;
		}
	}

exit:
	return status;
}

static u32 therm_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
		struct pmu_msg *msg, struct pmu_payload *payload,
		u32 queue_id, pmu_callback callback, void* cb_param,
		u32 *seq_desc, unsigned long timeout)
{
	u32 status;
	struct therm_pmucmdhandler_params *handlerparams = NULL;

	status = nvgpu_pmu_cmd_post(g, cmd, msg, payload,
				queue_id,
				callback,
				cb_param,
				seq_desc,
				timeout);
	if (status) {
		nvgpu_err(g,
			"unable to post therm cmd for unit %x cmd id %x size %x",
			cmd->hdr.unit_id, cmd->cmd.therm.cmd_type, cmd->hdr.size);
		goto exit;
	}

	if (cb_param) {
		handlerparams = (struct therm_pmucmdhandler_params*)cb_param;

		pmu_wait_message_cond(&g->pmu,
				gk20a_get_gr_idle_timeout(g),
				&handlerparams->success, 1);

		if (handlerparams->success == 0) {
			nvgpu_err(g, "could not process cmd");
			status = -ETIMEDOUT;
			goto exit;
		}
	}

exit:
	return status;
}

static u32 therm_set_warn_temp_limit(struct gk20a *g)
{
	u32 seqdesc = 0;
	struct pmu_cmd cmd;
	struct pmu_msg msg;
	struct pmu_payload payload;
	struct nv_pmu_therm_rpc rpccall;
	struct therm_pmucmdhandler_params handlerparams;

	memset(&payload, 0, sizeof(struct pmu_payload));
	memset(&cmd, 0, sizeof(struct pmu_cmd));
	memset(&msg, 0, sizeof(struct pmu_msg));
	memset(&rpccall, 0, sizeof(struct nv_pmu_therm_rpc));
	memset(&handlerparams, 0, sizeof(struct therm_pmucmdhandler_params));

	rpccall.function = NV_PMU_THERM_RPC_ID_SLCT_EVENT_TEMP_TH_SET;
	rpccall.params.slct_event_temp_th_set.event_id =
		NV_PMU_THERM_EVENT_THERMAL_1;
	rpccall.params.slct_event_temp_th_set.temp_threshold = g->curr_warn_temp;
	rpccall.b_supported = 0;

	cmd.hdr.unit_id = PMU_UNIT_THERM;
	cmd.hdr.size = ((u32)sizeof(struct nv_pmu_therm_cmd_rpc) +
			(u32)sizeof(struct pmu_hdr));
	cmd.cmd.therm.cmd_type = NV_PMU_THERM_CMD_ID_RPC;

	msg.hdr.size = sizeof(struct pmu_msg);

	payload.in.buf = (u8 *)&rpccall;
	payload.in.size = (u32)sizeof(struct nv_pmu_therm_rpc);
	payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
	payload.in.offset = NV_PMU_THERM_CMD_RPC_ALLOC_OFFSET;

	payload.out.buf = (u8 *)&rpccall;
	payload.out.size = (u32)sizeof(struct nv_pmu_therm_rpc);
	payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
	payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;

	/* Setup the handler params to communicate back results.*/
	handlerparams.success = 0;
	handlerparams.prpccall = &rpccall;

	return therm_pmu_cmd_post(g, &cmd, NULL, &payload,
				PMU_COMMAND_QUEUE_LPQ,
				therm_pmucmdhandler,
				(void *)&handlerparams,
				&seqdesc, ~0);
}

static u32 therm_enable_slct_notification_request(struct gk20a *g)
{
	u32 seqdesc = 0;
	struct pmu_cmd cmd = { {0} };

	cmd.hdr.unit_id = PMU_UNIT_THERM;
	cmd.hdr.size = ((u32)sizeof(struct nv_pmu_therm_cmd_hw_slowdown_notification) +
		(u32)sizeof(struct pmu_hdr));

	cmd.cmd.therm.cmd_type = NV_PMU_THERM_CMD_ID_HW_SLOWDOWN_NOTIFICATION;
	cmd.cmd.therm.hw_slct_notification.request =
		NV_RM_PMU_THERM_HW_SLOWDOWN_NOTIFICATION_REQUEST_ENABLE;

	return therm_pmu_cmd_post(g, &cmd, NULL, NULL,
				PMU_COMMAND_QUEUE_LPQ,
				NULL,
				NULL,
				&seqdesc, ~0);
}

static u32 therm_send_slct_configuration_to_pmu(struct gk20a *g)
{
	u32 seqdesc = 0;
	struct pmu_cmd cmd;
	struct pmu_msg msg;
	struct pmu_payload payload;
	struct nv_pmu_therm_rpc rpccall;
	struct therm_pmucmdhandler_params handlerparams;

	memset(&payload, 0, sizeof(struct pmu_payload));
	memset(&cmd, 0, sizeof(struct pmu_cmd));
	memset(&msg, 0, sizeof(struct pmu_msg));
	memset(&rpccall, 0, sizeof(struct nv_pmu_therm_rpc));
	memset(&handlerparams, 0, sizeof(struct therm_pmucmdhandler_params));

	rpccall.function = NV_PMU_THERM_RPC_ID_SLCT;
	rpccall.params.slct.mask_enabled =
		(1 << NV_PMU_THERM_EVENT_THERMAL_1);
	rpccall.b_supported = 0;

	cmd.hdr.unit_id = PMU_UNIT_THERM;
	cmd.hdr.size = ((u32)sizeof(struct nv_pmu_therm_cmd_rpc) +
			(u32)sizeof(struct pmu_hdr));
	cmd.cmd.therm.cmd_type = NV_PMU_THERM_CMD_ID_RPC;

	msg.hdr.size = sizeof(struct pmu_msg);

	payload.in.buf = (u8 *)&rpccall;
	payload.in.size = (u32)sizeof(struct nv_pmu_therm_rpc);
	payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
	payload.in.offset = NV_PMU_THERM_CMD_RPC_ALLOC_OFFSET;

	payload.out.buf = (u8 *)&rpccall;
	payload.out.size = (u32)sizeof(struct nv_pmu_therm_rpc);
	payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
	payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;

	/* Setup the handler params to communicate back results.*/
	handlerparams.success = 0;
	handlerparams.prpccall = &rpccall;

	return therm_pmu_cmd_post(g, &cmd, NULL, &payload,
				PMU_COMMAND_QUEUE_LPQ,
				therm_pmucmdhandler,
				(void *)&handlerparams,
				&seqdesc, ~0);
}

u32 therm_configure_therm_alert(struct gk20a *g)
{
	u32 status;

	status = therm_enable_slct_notification_request(g);
	if (status) {
		nvgpu_err(g,
			"therm_enable_slct_notification_request-failed %d",
			status);
		goto exit;
	}

	status = therm_send_slct_configuration_to_pmu(g);
	if (status) {
		nvgpu_err(g,
			"therm_send_slct_configuration_to_pmu-failed %d",
			status);
		goto exit;
	}

	status = therm_set_warn_temp_limit(g);
	if (status) {
		nvgpu_err(g,
			"therm_set_warn_temp_limit-failed %d",
			status);
		goto exit;
	}
exit:
	return status;
}