summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
diff options
context:
space:
mode:
authorArto Merilainen <amerilainen@nvidia.com>2014-03-19 03:38:25 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:08:53 -0400
commita9785995d5f22aaeb659285f8aeb64d8b56982e0 (patch)
treecc75f75bcf43db316a002a7a240b81f299bf6d7f /drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
parent61efaf843c22b85424036ec98015121c08f5f16c (diff)
gpu: nvgpu: Add NVIDIA GPU Driver
This patch moves the NVIDIA GPU driver to a new location. Bug 1482562 Change-Id: I24293810b9d0f1504fd9be00135e21dad656ccb6 Signed-off-by: Arto Merilainen <amerilainen@nvidia.com> Reviewed-on: http://git-master/r/383722 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h1097
1 files changed, 1097 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
new file mode 100644
index 00000000..c1b8ff1f
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -0,0 +1,1097 @@
1/*
2 * drivers/video/tegra/host/gk20a/pmu_gk20a.h
3 *
4 * GK20A PMU (aka. gPMU outside gk20a context)
5 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#ifndef __PMU_GK20A_H__
22#define __PMU_GK20A_H__
23
24/* defined by pmu hw spec */
25#define GK20A_PMU_VA_START ((128 * 1024) << 10)
26#define GK20A_PMU_VA_SIZE (512 * 1024 * 1024)
27#define GK20A_PMU_INST_SIZE (4 * 1024)
28#define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024)
29#define GK20A_PMU_SEQ_BUF_SIZE 4096
30
31#define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe)
32
33/* PMU Command/Message Interfaces for Adaptive Power */
34/* Macro to get Histogram index */
35#define PMU_AP_HISTOGRAM(idx) (idx)
36#define PMU_AP_HISTOGRAM_CONT (4)
37
38/* Total number of histogram bins */
39#define PMU_AP_CFG_HISTOGRAM_BIN_N (16)
40
41/* Mapping between Idle counters and histograms */
42#define PMU_AP_IDLE_MASK_HIST_IDX_0 (2)
43#define PMU_AP_IDLE_MASK_HIST_IDX_1 (3)
44#define PMU_AP_IDLE_MASK_HIST_IDX_2 (5)
45#define PMU_AP_IDLE_MASK_HIST_IDX_3 (6)
46
47
48/* Mapping between AP_CTRLs and Histograms */
49#define PMU_AP_HISTOGRAM_IDX_GRAPHICS (PMU_AP_HISTOGRAM(1))
50
51/* Mapping between AP_CTRLs and Idle counters */
52#define PMU_AP_IDLE_MASK_GRAPHICS (PMU_AP_IDLE_MASK_HIST_IDX_1)
53
54#define APP_VERSION_1 17997577
55#define APP_VERSION_0 16856675
56
57
58enum pmu_perfmon_cmd_start_fields {
59 COUNTER_ALLOC
60};
61
62/* Adaptive Power Controls (AP_CTRL) */
63enum {
64 PMU_AP_CTRL_ID_GRAPHICS = 0x0,
65 /* PMU_AP_CTRL_ID_MS ,*/
66 PMU_AP_CTRL_ID_MAX ,
67};
68
69/* AP_CTRL Statistics */
70struct pmu_ap_ctrl_stat {
71 /*
72 * Represents whether AP is active or not
73 * TODO: This is NvBool in RM; is that 1 byte of 4 bytes?
74 */
75 u8 b_active;
76
77 /* Idle filter represented by histogram bin index */
78 u8 idle_filter_x;
79 u8 rsvd[2];
80
81 /* Total predicted power saving cycles. */
82 s32 power_saving_h_cycles;
83
84 /* Counts how many times AP gave us -ve power benefits. */
85 u32 bad_decision_count;
86
87 /*
88 * Number of times ap structure needs to skip AP iterations
89 * KICK_CTRL from kernel updates this parameter.
90 */
91 u32 skip_count;
92 u8 bin[PMU_AP_CFG_HISTOGRAM_BIN_N];
93};
94
95/* Parameters initialized by INITn APCTRL command */
96struct pmu_ap_ctrl_init_params {
97 /* Minimum idle filter value in Us */
98 u32 min_idle_filter_us;
99
100 /*
101 * Minimum Targeted Saving in Us. AP will update idle thresholds only
102 * if power saving achieved by updating idle thresholds is greater than
103 * Minimum targeted saving.
104 */
105 u32 min_target_saving_us;
106
107 /* Minimum targeted residency of power feature in Us */
108 u32 power_break_even_us;
109
110 /*
111 * Maximum number of allowed power feature cycles per sample.
112 *
113 * We are allowing at max "pgPerSampleMax" cycles in one iteration of AP
114 * AKA pgPerSampleMax in original algorithm.
115 */
116 u32 cycles_per_sample_max;
117};
118
119/* AP Commands/Message structures */
120
121/*
122 * Structure for Generic AP Commands
123 */
124struct pmu_ap_cmd_common {
125 u8 cmd_type;
126 u16 cmd_id;
127};
128
129/*
130 * Structure for INIT AP command
131 */
132struct pmu_ap_cmd_init {
133 u8 cmd_type;
134 u16 cmd_id;
135 u8 rsvd;
136 u32 pg_sampling_period_us;
137};
138
139/*
140 * Structure for Enable/Disable ApCtrl Commands
141 */
142struct pmu_ap_cmd_enable_ctrl {
143 u8 cmd_type;
144 u16 cmd_id;
145
146 u8 ctrl_id;
147};
148
149struct pmu_ap_cmd_disable_ctrl {
150 u8 cmd_type;
151 u16 cmd_id;
152
153 u8 ctrl_id;
154};
155
156/*
157 * Structure for INIT command
158 */
159struct pmu_ap_cmd_init_ctrl {
160 u8 cmd_type;
161 u16 cmd_id;
162 u8 ctrl_id;
163 struct pmu_ap_ctrl_init_params params;
164};
165
166struct pmu_ap_cmd_init_and_enable_ctrl {
167 u8 cmd_type;
168 u16 cmd_id;
169 u8 ctrl_id;
170 struct pmu_ap_ctrl_init_params params;
171};
172
173/*
174 * Structure for KICK_CTRL command
175 */
176struct pmu_ap_cmd_kick_ctrl {
177 u8 cmd_type;
178 u16 cmd_id;
179 u8 ctrl_id;
180
181 u32 skip_count;
182};
183
184/*
185 * Structure for PARAM command
186 */
187struct pmu_ap_cmd_param {
188 u8 cmd_type;
189 u16 cmd_id;
190 u8 ctrl_id;
191
192 u32 data;
193};
194
195/*
196 * Defines for AP commands
197 */
198enum {
199 PMU_AP_CMD_ID_INIT = 0x0 ,
200 PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL,
201 PMU_AP_CMD_ID_ENABLE_CTRL ,
202 PMU_AP_CMD_ID_DISABLE_CTRL ,
203 PMU_AP_CMD_ID_KICK_CTRL ,
204};
205
206/*
207 * AP Command
208 */
209union pmu_ap_cmd {
210 u8 cmd_type;
211 struct pmu_ap_cmd_common cmn;
212 struct pmu_ap_cmd_init init;
213 struct pmu_ap_cmd_init_and_enable_ctrl init_and_enable_ctrl;
214 struct pmu_ap_cmd_enable_ctrl enable_ctrl;
215 struct pmu_ap_cmd_disable_ctrl disable_ctrl;
216 struct pmu_ap_cmd_kick_ctrl kick_ctrl;
217};
218
219/*
220 * Structure for generic AP Message
221 */
222struct pmu_ap_msg_common {
223 u8 msg_type;
224 u16 msg_id;
225};
226
227/*
228 * Structure for INIT_ACK Message
229 */
230struct pmu_ap_msg_init_ack {
231 u8 msg_type;
232 u16 msg_id;
233 u8 ctrl_id;
234 u32 stats_dmem_offset;
235};
236
237/*
238 * Defines for AP messages
239 */
240enum {
241 PMU_AP_MSG_ID_INIT_ACK = 0x0,
242};
243
244/*
245 * AP Message
246 */
247union pmu_ap_msg {
248 u8 msg_type;
249 struct pmu_ap_msg_common cmn;
250 struct pmu_ap_msg_init_ack init_ack;
251};
252
253/* Default Sampling Period of AELPG */
254#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
255
256/* Default values of APCTRL parameters */
257#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100)
258#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
259#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
260#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (100)
261
262/*
263 * Disable reason for Adaptive Power Controller
264 */
265enum {
266 APCTRL_DISABLE_REASON_RM_UNLOAD,
267 APCTRL_DISABLE_REASON_RMCTRL,
268};
269
270/*
271 * Adaptive Power Controller
272 */
273struct ap_ctrl {
274 u32 stats_dmem_offset;
275 u32 disable_reason_mask;
276 struct pmu_ap_ctrl_stat stat_cache;
277 u8 b_ready;
278};
279
280/*
281 * Adaptive Power structure
282 *
283 * ap structure provides generic infrastructure to make any power feature
284 * adaptive.
285 */
286struct pmu_ap {
287 u32 supported_mask;
288 struct ap_ctrl ap_ctrl[PMU_AP_CTRL_ID_MAX];
289};
290
291
292enum {
293 GK20A_PMU_DMAIDX_UCODE = 0,
294 GK20A_PMU_DMAIDX_VIRT = 1,
295 GK20A_PMU_DMAIDX_PHYS_VID = 2,
296 GK20A_PMU_DMAIDX_PHYS_SYS_COH = 3,
297 GK20A_PMU_DMAIDX_PHYS_SYS_NCOH = 4,
298 GK20A_PMU_DMAIDX_RSVD = 5,
299 GK20A_PMU_DMAIDX_PELPG = 6,
300 GK20A_PMU_DMAIDX_END = 7
301};
302
303struct pmu_mem_v0 {
304 u32 dma_base;
305 u8 dma_offset;
306 u8 dma_idx;
307};
308
309struct pmu_mem_v1 {
310 u32 dma_base;
311 u8 dma_offset;
312 u8 dma_idx;
313 u16 fb_size;
314};
315
316struct pmu_dmem {
317 u16 size;
318 u32 offset;
319};
320
321/* Make sure size of this structure is a multiple of 4 bytes */
322struct pmu_cmdline_args_v0 {
323 u32 cpu_freq_hz; /* Frequency of the clock driving PMU */
324 u32 falc_trace_size; /* falctrace buffer size (bytes) */
325 u32 falc_trace_dma_base; /* 256-byte block address */
326 u32 falc_trace_dma_idx; /* dmaIdx for DMA operations */
327 struct pmu_mem_v0 gc6_ctx; /* dmem offset of gc6 context */
328};
329
330struct pmu_cmdline_args_v1 {
331 u32 cpu_freq_hz; /* Frequency of the clock driving PMU */
332 u32 falc_trace_size; /* falctrace buffer size (bytes) */
333 u32 falc_trace_dma_base; /* 256-byte block address */
334 u32 falc_trace_dma_idx; /* dmaIdx for DMA operations */
335 u8 secure_mode;
336 struct pmu_mem_v1 gc6_ctx; /* dmem offset of gc6 context */
337};
338
339#define GK20A_PMU_DMEM_BLKSIZE2 8
340
341#define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32
342#define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64
343
344struct pmu_ucode_desc {
345 u32 descriptor_size;
346 u32 image_size;
347 u32 tools_version;
348 u32 app_version;
349 char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH];
350 u32 bootloader_start_offset;
351 u32 bootloader_size;
352 u32 bootloader_imem_offset;
353 u32 bootloader_entry_point;
354 u32 app_start_offset;
355 u32 app_size;
356 u32 app_imem_offset;
357 u32 app_imem_entry;
358 u32 app_dmem_offset;
359 u32 app_resident_code_offset; /* Offset from appStartOffset */
360 u32 app_resident_code_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */
361 u32 app_resident_data_offset; /* Offset from appStartOffset */
362 u32 app_resident_data_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */
363 u32 nb_overlays;
364 struct {u32 start; u32 size;} load_ovl[GK20A_PMU_UCODE_NB_MAX_OVERLAY];
365 u32 compressed;
366};
367
368#define PMU_UNIT_REWIND (0x00)
369#define PMU_UNIT_I2C (0x01)
370#define PMU_UNIT_SEQ (0x02)
371#define PMU_UNIT_PG (0x03)
372#define PMU_UNIT_AVAILABLE1 (0x04)
373#define PMU_UNIT_AVAILABLE2 (0x05)
374#define PMU_UNIT_MEM (0x06)
375#define PMU_UNIT_INIT (0x07)
376#define PMU_UNIT_FBBA (0x08)
377#define PMU_UNIT_DIDLE (0x09)
378#define PMU_UNIT_AVAILABLE3 (0x0A)
379#define PMU_UNIT_AVAILABLE4 (0x0B)
380#define PMU_UNIT_HDCP_MAIN (0x0C)
381#define PMU_UNIT_HDCP_V (0x0D)
382#define PMU_UNIT_HDCP_SRM (0x0E)
383#define PMU_UNIT_NVDPS (0x0F)
384#define PMU_UNIT_DEINIT (0x10)
385#define PMU_UNIT_AVAILABLE5 (0x11)
386#define PMU_UNIT_PERFMON (0x12)
387#define PMU_UNIT_FAN (0x13)
388#define PMU_UNIT_PBI (0x14)
389#define PMU_UNIT_ISOBLIT (0x15)
390#define PMU_UNIT_DETACH (0x16)
391#define PMU_UNIT_DISP (0x17)
392#define PMU_UNIT_HDCP (0x18)
393#define PMU_UNIT_REGCACHE (0x19)
394#define PMU_UNIT_SYSMON (0x1A)
395#define PMU_UNIT_THERM (0x1B)
396#define PMU_UNIT_PMGR (0x1C)
397#define PMU_UNIT_PERF (0x1D)
398#define PMU_UNIT_PCM (0x1E)
399#define PMU_UNIT_RC (0x1F)
400#define PMU_UNIT_NULL (0x20)
401#define PMU_UNIT_LOGGER (0x21)
402#define PMU_UNIT_SMBPBI (0x22)
403#define PMU_UNIT_END (0x23)
404
405#define PMU_UNIT_TEST_START (0xFE)
406#define PMU_UNIT_END_SIM (0xFF)
407#define PMU_UNIT_TEST_END (0xFF)
408
409#define PMU_UNIT_ID_IS_VALID(id) \
410 (((id) < PMU_UNIT_END) || ((id) >= PMU_UNIT_TEST_START))
411
412#define PMU_DMEM_ALLOC_ALIGNMENT (32)
413#define PMU_DMEM_ALIGNMENT (4)
414
415#define PMU_CMD_FLAGS_PMU_MASK (0xF0)
416
417#define PMU_CMD_FLAGS_STATUS BIT(0)
418#define PMU_CMD_FLAGS_INTR BIT(1)
419#define PMU_CMD_FLAGS_EVENT BIT(2)
420#define PMU_CMD_FLAGS_WATERMARK BIT(3)
421
422struct pmu_hdr {
423 u8 unit_id;
424 u8 size;
425 u8 ctrl_flags;
426 u8 seq_id;
427};
428#define PMU_MSG_HDR_SIZE sizeof(struct pmu_hdr)
429#define PMU_CMD_HDR_SIZE sizeof(struct pmu_hdr)
430
431#define PMU_QUEUE_COUNT 5
432
433struct pmu_allocation_v0 {
434 u8 pad[3];
435 u8 fb_mem_use;
436 struct {
437 struct pmu_dmem dmem;
438 struct pmu_mem_v0 fb;
439 } alloc;
440};
441
442struct pmu_allocation_v1 {
443 struct {
444 struct pmu_dmem dmem;
445 struct pmu_mem_v1 fb;
446 } alloc;
447};
448
449enum {
450 PMU_INIT_MSG_TYPE_PMU_INIT = 0,
451};
452
453struct pmu_init_msg_pmu_v0 {
454 u8 msg_type;
455 u8 pad;
456
457 struct {
458 u16 size;
459 u16 offset;
460 u8 index;
461 u8 pad;
462 } queue_info[PMU_QUEUE_COUNT];
463
464 u16 sw_managed_area_offset;
465 u16 sw_managed_area_size;
466};
467
468struct pmu_init_msg_pmu_v1 {
469 u8 msg_type;
470 u8 pad;
471 u16 os_debug_entry_point;
472
473 struct {
474 u16 size;
475 u16 offset;
476 u8 index;
477 u8 pad;
478 } queue_info[PMU_QUEUE_COUNT];
479
480 u16 sw_managed_area_offset;
481 u16 sw_managed_area_size;
482};
483
484union pmu_init_msg_pmu {
485 struct pmu_init_msg_pmu_v0 v0;
486 struct pmu_init_msg_pmu_v1 v1;
487};
488
489struct pmu_init_msg {
490 union {
491 u8 msg_type;
492 struct pmu_init_msg_pmu_v1 pmu_init_v1;
493 struct pmu_init_msg_pmu_v0 pmu_init_v0;
494 };
495};
496
497enum {
498 PMU_PG_ELPG_MSG_INIT_ACK,
499 PMU_PG_ELPG_MSG_DISALLOW_ACK,
500 PMU_PG_ELPG_MSG_ALLOW_ACK,
501 PMU_PG_ELPG_MSG_FREEZE_ACK,
502 PMU_PG_ELPG_MSG_FREEZE_ABORT,
503 PMU_PG_ELPG_MSG_UNFREEZE_ACK,
504};
505
506struct pmu_pg_msg_elpg_msg {
507 u8 msg_type;
508 u8 engine_id;
509 u16 msg;
510};
511
512enum {
513 PMU_PG_STAT_MSG_RESP_DMEM_OFFSET = 0,
514};
515
516struct pmu_pg_msg_stat {
517 u8 msg_type;
518 u8 engine_id;
519 u16 sub_msg_id;
520 u32 data;
521};
522
523enum {
524 PMU_PG_MSG_ENG_BUF_LOADED,
525 PMU_PG_MSG_ENG_BUF_UNLOADED,
526 PMU_PG_MSG_ENG_BUF_FAILED,
527};
528
529struct pmu_pg_msg_eng_buf_stat {
530 u8 msg_type;
531 u8 engine_id;
532 u8 buf_idx;
533 u8 status;
534};
535
536struct pmu_pg_msg {
537 union {
538 u8 msg_type;
539 struct pmu_pg_msg_elpg_msg elpg_msg;
540 struct pmu_pg_msg_stat stat;
541 struct pmu_pg_msg_eng_buf_stat eng_buf_stat;
542 /* TBD: other pg messages */
543 union pmu_ap_msg ap_msg;
544 };
545};
546
547enum {
548 PMU_RC_MSG_TYPE_UNHANDLED_CMD = 0,
549};
550
551struct pmu_rc_msg_unhandled_cmd {
552 u8 msg_type;
553 u8 unit_id;
554};
555
556struct pmu_rc_msg {
557 u8 msg_type;
558 struct pmu_rc_msg_unhandled_cmd unhandled_cmd;
559};
560
561enum {
562 PMU_PG_CMD_ID_ELPG_CMD = 0,
563 PMU_PG_CMD_ID_ENG_BUF_LOAD,
564 PMU_PG_CMD_ID_ENG_BUF_UNLOAD,
565 PMU_PG_CMD_ID_PG_STAT,
566 PMU_PG_CMD_ID_PG_LOG_INIT,
567 PMU_PG_CMD_ID_PG_LOG_FLUSH,
568 PMU_PG_CMD_ID_PG_PARAM,
569 PMU_PG_CMD_ID_ELPG_INIT,
570 PMU_PG_CMD_ID_ELPG_POLL_CTXSAVE,
571 PMU_PG_CMD_ID_ELPG_ABORT_POLL,
572 PMU_PG_CMD_ID_ELPG_PWR_UP,
573 PMU_PG_CMD_ID_ELPG_DISALLOW,
574 PMU_PG_CMD_ID_ELPG_ALLOW,
575 PMU_PG_CMD_ID_AP,
576 RM_PMU_PG_CMD_ID_PSI,
577 RM_PMU_PG_CMD_ID_CG,
578 PMU_PG_CMD_ID_ZBC_TABLE_UPDATE,
579 PMU_PG_CMD_ID_PWR_RAIL_GATE_DISABLE = 0x20,
580 PMU_PG_CMD_ID_PWR_RAIL_GATE_ENABLE,
581 PMU_PG_CMD_ID_PWR_RAIL_SMU_MSG_DISABLE
582};
583
584enum {
585 PMU_PG_ELPG_CMD_INIT,
586 PMU_PG_ELPG_CMD_DISALLOW,
587 PMU_PG_ELPG_CMD_ALLOW,
588 PMU_PG_ELPG_CMD_FREEZE,
589 PMU_PG_ELPG_CMD_UNFREEZE,
590};
591
592struct pmu_pg_cmd_elpg_cmd {
593 u8 cmd_type;
594 u8 engine_id;
595 u16 cmd;
596};
597
598struct pmu_pg_cmd_eng_buf_load {
599 u8 cmd_type;
600 u8 engine_id;
601 u8 buf_idx;
602 u8 pad;
603 u16 buf_size;
604 u32 dma_base;
605 u8 dma_offset;
606 u8 dma_idx;
607};
608
609enum {
610 PMU_PG_STAT_CMD_ALLOC_DMEM = 0,
611};
612
613struct pmu_pg_cmd_stat {
614 u8 cmd_type;
615 u8 engine_id;
616 u16 sub_cmd_id;
617 u32 data;
618};
619
620struct pmu_pg_cmd {
621 union {
622 u8 cmd_type;
623 struct pmu_pg_cmd_elpg_cmd elpg_cmd;
624 struct pmu_pg_cmd_eng_buf_load eng_buf_load;
625 struct pmu_pg_cmd_stat stat;
626 /* TBD: other pg commands */
627 union pmu_ap_cmd ap_cmd;
628 };
629};
630
631/* PERFMON */
632#define PMU_DOMAIN_GROUP_PSTATE 0
633#define PMU_DOMAIN_GROUP_GPC2CLK 1
634#define PMU_DOMAIN_GROUP_NUM 2
635
636/* TBD: smart strategy */
637#define PMU_PERFMON_PCT_TO_INC 58
638#define PMU_PERFMON_PCT_TO_DEC 23
639
640struct pmu_perfmon_counter {
641 u8 index;
642 u8 flags;
643 u8 group_id;
644 u8 valid;
645 u16 upper_threshold; /* units of 0.01% */
646 u16 lower_threshold; /* units of 0.01% */
647};
648
649#define PMU_PERFMON_FLAG_ENABLE_INCREASE (0x00000001)
650#define PMU_PERFMON_FLAG_ENABLE_DECREASE (0x00000002)
651#define PMU_PERFMON_FLAG_CLEAR_PREV (0x00000004)
652
653/* PERFMON CMD */
654enum {
655 PMU_PERFMON_CMD_ID_START = 0,
656 PMU_PERFMON_CMD_ID_STOP = 1,
657 PMU_PERFMON_CMD_ID_INIT = 2
658};
659
660struct pmu_perfmon_cmd_start_v1 {
661 u8 cmd_type;
662 u8 group_id;
663 u8 state_id;
664 u8 flags;
665 struct pmu_allocation_v1 counter_alloc;
666};
667
668struct pmu_perfmon_cmd_start_v0 {
669 u8 cmd_type;
670 u8 group_id;
671 u8 state_id;
672 u8 flags;
673 struct pmu_allocation_v0 counter_alloc;
674};
675
676struct pmu_perfmon_cmd_stop {
677 u8 cmd_type;
678};
679
680struct pmu_perfmon_cmd_init_v1 {
681 u8 cmd_type;
682 u8 to_decrease_count;
683 u8 base_counter_id;
684 u32 sample_period_us;
685 struct pmu_allocation_v1 counter_alloc;
686 u8 num_counters;
687 u8 samples_in_moving_avg;
688 u16 sample_buffer;
689};
690
691struct pmu_perfmon_cmd_init_v0 {
692 u8 cmd_type;
693 u8 to_decrease_count;
694 u8 base_counter_id;
695 u32 sample_period_us;
696 struct pmu_allocation_v0 counter_alloc;
697 u8 num_counters;
698 u8 samples_in_moving_avg;
699 u16 sample_buffer;
700};
701
702struct pmu_perfmon_cmd {
703 union {
704 u8 cmd_type;
705 struct pmu_perfmon_cmd_start_v0 start_v0;
706 struct pmu_perfmon_cmd_start_v1 start_v1;
707 struct pmu_perfmon_cmd_stop stop;
708 struct pmu_perfmon_cmd_init_v0 init_v0;
709 struct pmu_perfmon_cmd_init_v1 init_v1;
710 };
711};
712
713struct pmu_zbc_cmd {
714 u8 cmd_type;
715 u8 pad;
716 u16 entry_mask;
717};
718
719/* PERFMON MSG */
720enum {
721 PMU_PERFMON_MSG_ID_INCREASE_EVENT = 0,
722 PMU_PERFMON_MSG_ID_DECREASE_EVENT = 1,
723 PMU_PERFMON_MSG_ID_INIT_EVENT = 2,
724 PMU_PERFMON_MSG_ID_ACK = 3
725};
726
727struct pmu_perfmon_msg_generic {
728 u8 msg_type;
729 u8 state_id;
730 u8 group_id;
731 u8 data;
732};
733
734struct pmu_perfmon_msg {
735 union {
736 u8 msg_type;
737 struct pmu_perfmon_msg_generic gen;
738 };
739};
740
741
742struct pmu_cmd {
743 struct pmu_hdr hdr;
744 union {
745 struct pmu_perfmon_cmd perfmon;
746 struct pmu_pg_cmd pg;
747 struct pmu_zbc_cmd zbc;
748 } cmd;
749};
750
751struct pmu_msg {
752 struct pmu_hdr hdr;
753 union {
754 struct pmu_init_msg init;
755 struct pmu_perfmon_msg perfmon;
756 struct pmu_pg_msg pg;
757 struct pmu_rc_msg rc;
758 } msg;
759};
760
761#define PMU_SHA1_GID_SIGNATURE 0xA7C66AD2
762#define PMU_SHA1_GID_SIGNATURE_SIZE 4
763
764#define PMU_SHA1_GID_SIZE 16
765
766struct pmu_sha1_gid {
767 bool valid;
768 u8 gid[PMU_SHA1_GID_SIZE];
769};
770
771struct pmu_sha1_gid_data {
772 u8 signature[PMU_SHA1_GID_SIGNATURE_SIZE];
773 u8 gid[PMU_SHA1_GID_SIZE];
774};
775
776#define PMU_COMMAND_QUEUE_HPQ 0 /* write by sw, read by pmu, protected by sw mutex lock */
777#define PMU_COMMAND_QUEUE_LPQ 1 /* write by sw, read by pmu, protected by sw mutex lock */
778#define PMU_COMMAND_QUEUE_BIOS 2 /* read/write by sw/hw, protected by hw pmu mutex, id = 2 */
779#define PMU_COMMAND_QUEUE_SMI 3 /* read/write by sw/hw, protected by hw pmu mutex, id = 3 */
780#define PMU_MESSAGE_QUEUE 4 /* write by pmu, read by sw, accessed by interrupt handler, no lock */
781#define PMU_QUEUE_COUNT 5
782
783enum {
784 PMU_MUTEX_ID_RSVD1 = 0 ,
785 PMU_MUTEX_ID_GPUSER ,
786 PMU_MUTEX_ID_QUEUE_BIOS ,
787 PMU_MUTEX_ID_QUEUE_SMI ,
788 PMU_MUTEX_ID_GPMUTEX ,
789 PMU_MUTEX_ID_I2C ,
790 PMU_MUTEX_ID_RMLOCK ,
791 PMU_MUTEX_ID_MSGBOX ,
792 PMU_MUTEX_ID_FIFO ,
793 PMU_MUTEX_ID_PG ,
794 PMU_MUTEX_ID_GR ,
795 PMU_MUTEX_ID_CLK ,
796 PMU_MUTEX_ID_RSVD6 ,
797 PMU_MUTEX_ID_RSVD7 ,
798 PMU_MUTEX_ID_RSVD8 ,
799 PMU_MUTEX_ID_RSVD9 ,
800 PMU_MUTEX_ID_INVALID
801};
802
803#define PMU_IS_COMMAND_QUEUE(id) \
804 ((id) < PMU_MESSAGE_QUEUE)
805
806#define PMU_IS_SW_COMMAND_QUEUE(id) \
807 (((id) == PMU_COMMAND_QUEUE_HPQ) || \
808 ((id) == PMU_COMMAND_QUEUE_LPQ))
809
810#define PMU_IS_MESSAGE_QUEUE(id) \
811 ((id) == PMU_MESSAGE_QUEUE)
812
813enum
814{
815 OFLAG_READ = 0,
816 OFLAG_WRITE
817};
818
819#define QUEUE_SET (true)
820#define QUEUE_GET (false)
821
822#define QUEUE_ALIGNMENT (4)
823
824#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
825#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
826#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
827
828enum
829{
830 PMU_DMAIDX_UCODE = 0,
831 PMU_DMAIDX_VIRT = 1,
832 PMU_DMAIDX_PHYS_VID = 2,
833 PMU_DMAIDX_PHYS_SYS_COH = 3,
834 PMU_DMAIDX_PHYS_SYS_NCOH = 4,
835 PMU_DMAIDX_RSVD = 5,
836 PMU_DMAIDX_PELPG = 6,
837 PMU_DMAIDX_END = 7
838};
839
840struct pmu_gk20a;
841struct pmu_queue;
842
843struct pmu_queue {
844
845 /* used by hw, for BIOS/SMI queue */
846 u32 mutex_id;
847 u32 mutex_lock;
848 /* used by sw, for LPQ/HPQ queue */
849 struct mutex mutex;
850
851 /* current write position */
852 u32 position;
853 /* physical dmem offset where this queue begins */
854 u32 offset;
855 /* logical queue identifier */
856 u32 id;
857 /* physical queue index */
858 u32 index;
859 /* in bytes */
860 u32 size;
861
862 /* open-flag */
863 u32 oflag;
864 bool opened; /* opened implies locked */
865 bool locked; /* check free space after setting locked but before setting opened */
866};
867
868
869#define PMU_MUTEX_ID_IS_VALID(id) \
870 ((id) < PMU_MUTEX_ID_INVALID)
871
872#define PMU_INVALID_MUTEX_OWNER_ID (0)
873
874struct pmu_mutex {
875 u32 id;
876 u32 index;
877 u32 ref_cnt;
878};
879
880#define PMU_MAX_NUM_SEQUENCES (256)
881#define PMU_SEQ_BIT_SHIFT (5)
882#define PMU_SEQ_TBL_SIZE \
883 (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT)
884
885#define PMU_INVALID_SEQ_DESC (~0)
886
887enum
888{
889 PMU_SEQ_STATE_FREE = 0,
890 PMU_SEQ_STATE_PENDING,
891 PMU_SEQ_STATE_USED,
892 PMU_SEQ_STATE_CANCELLED
893};
894
895struct pmu_payload {
896 struct {
897 void *buf;
898 u32 offset;
899 u32 size;
900 } in, out;
901};
902
903typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32,
904 u32);
905
906struct pmu_sequence {
907 u8 id;
908 u32 state;
909 u32 desc;
910 struct pmu_msg *msg;
911 union {
912 struct pmu_allocation_v0 in_v0;
913 struct pmu_allocation_v1 in_v1;
914 };
915 union {
916 struct pmu_allocation_v0 out_v0;
917 struct pmu_allocation_v1 out_v1;
918 };
919 u8 *out_payload;
920 pmu_callback callback;
921 void* cb_params;
922};
923
924struct pmu_pg_stats {
925 u64 pg_entry_start_timestamp;
926 u64 pg_ingating_start_timestamp;
927 u64 pg_exit_start_timestamp;
928 u64 pg_ungating_start_timestamp;
929 u32 pg_avg_entry_time_us;
930 u32 pg_ingating_cnt;
931 u32 pg_ingating_time_us;
932 u32 pg_avg_exit_time_us;
933 u32 pg_ungating_count;
934 u32 pg_ungating_time_us;
935 u32 pg_gating_cnt;
936 u32 pg_gating_deny_cnt;
937};
938
939#define PMU_PG_IDLE_THRESHOLD_SIM 1000
940#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
941/* TBD: QT or else ? */
942#define PMU_PG_IDLE_THRESHOLD 15000
943#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000
944
945/* state transition :
946 OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
947 ON => OFF is always synchronized */
948#define PMU_ELPG_STAT_OFF 0 /* elpg is off */
949#define PMU_ELPG_STAT_ON 1 /* elpg is on */
950#define PMU_ELPG_STAT_ON_PENDING 2 /* elpg is off, ALLOW cmd has been sent, wait for ack */
951#define PMU_ELPG_STAT_OFF_PENDING 3 /* elpg is on, DISALLOW cmd has been sent, wait for ack */
952#define PMU_ELPG_STAT_OFF_ON_PENDING 4 /* elpg is off, caller has requested on, but ALLOW
953 cmd hasn't been sent due to ENABLE_ALLOW delay */
954
955/* Falcon Register index */
956#define PMU_FALCON_REG_R0 (0)
957#define PMU_FALCON_REG_R1 (1)
958#define PMU_FALCON_REG_R2 (2)
959#define PMU_FALCON_REG_R3 (3)
960#define PMU_FALCON_REG_R4 (4)
961#define PMU_FALCON_REG_R5 (5)
962#define PMU_FALCON_REG_R6 (6)
963#define PMU_FALCON_REG_R7 (7)
964#define PMU_FALCON_REG_R8 (8)
965#define PMU_FALCON_REG_R9 (9)
966#define PMU_FALCON_REG_R10 (10)
967#define PMU_FALCON_REG_R11 (11)
968#define PMU_FALCON_REG_R12 (12)
969#define PMU_FALCON_REG_R13 (13)
970#define PMU_FALCON_REG_R14 (14)
971#define PMU_FALCON_REG_R15 (15)
972#define PMU_FALCON_REG_IV0 (16)
973#define PMU_FALCON_REG_IV1 (17)
974#define PMU_FALCON_REG_UNDEFINED (18)
975#define PMU_FALCON_REG_EV (19)
976#define PMU_FALCON_REG_SP (20)
977#define PMU_FALCON_REG_PC (21)
978#define PMU_FALCON_REG_IMB (22)
979#define PMU_FALCON_REG_DMB (23)
980#define PMU_FALCON_REG_CSW (24)
981#define PMU_FALCON_REG_CCR (25)
982#define PMU_FALCON_REG_SEC (26)
983#define PMU_FALCON_REG_CTX (27)
984#define PMU_FALCON_REG_EXCI (28)
985#define PMU_FALCON_REG_RSVD0 (29)
986#define PMU_FALCON_REG_RSVD1 (30)
987#define PMU_FALCON_REG_RSVD2 (31)
988#define PMU_FALCON_REG_SIZE (32)
989
990struct pmu_gk20a {
991
992 struct gk20a *g;
993
994 struct pmu_ucode_desc *desc;
995 struct pmu_mem_desc ucode;
996
997 struct pmu_mem_desc pg_buf;
998 /* TBD: remove this if ZBC seq is fixed */
999 struct pmu_mem_desc seq_buf;
1000 bool buf_loaded;
1001
1002 struct pmu_sha1_gid gid_info;
1003
1004 struct pmu_queue queue[PMU_QUEUE_COUNT];
1005
1006 struct pmu_sequence *seq;
1007 unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE];
1008 u32 next_seq_desc;
1009
1010 struct pmu_mutex *mutex;
1011 u32 mutex_cnt;
1012
1013 struct mutex pmu_copy_lock;
1014 struct mutex pmu_seq_lock;
1015
1016 struct gk20a_allocator dmem;
1017
1018 u32 *ucode_image;
1019 bool pmu_ready;
1020
1021 u32 zbc_save_done;
1022
1023 u32 stat_dmem_offset;
1024
1025 bool elpg_ready;
1026 u32 elpg_stat;
1027 wait_queue_head_t pg_wq;
1028
1029#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */
1030 struct delayed_work elpg_enable; /* deferred elpg enable */
1031 struct work_struct pg_init;
1032 bool elpg_enable_allow; /* true after init, false after disable, true after delay */
1033 struct mutex elpg_mutex; /* protect elpg enable/disable */
1034 int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */
1035
1036 struct pmu_perfmon_counter perfmon_counter;
1037 u32 perfmon_state_id[PMU_DOMAIN_GROUP_NUM];
1038
1039 bool initialized;
1040
1041 void (*remove_support)(struct pmu_gk20a *pmu);
1042 bool sw_ready;
1043 bool perfmon_ready;
1044
1045 u32 sample_buffer;
1046
1047 struct mutex isr_mutex;
1048 bool zbc_ready;
1049 union {
1050 struct pmu_cmdline_args_v0 args_v0;
1051 struct pmu_cmdline_args_v1 args_v1;
1052 };
1053};
1054
1055struct gk20a_pmu_save_state {
1056 struct pmu_sequence *seq;
1057 u32 next_seq_desc;
1058 struct pmu_mutex *mutex;
1059 u32 mutex_cnt;
1060 struct pmu_ucode_desc *desc;
1061 struct pmu_mem_desc ucode;
1062 struct pmu_mem_desc seq_buf;
1063 struct pmu_mem_desc pg_buf;
1064 struct delayed_work elpg_enable;
1065 wait_queue_head_t pg_wq;
1066 bool sw_ready;
1067 struct work_struct pg_init;
1068};
1069
1070int gk20a_init_pmu_support(struct gk20a *g);
1071int gk20a_init_pmu_setup_hw2(struct gk20a *g);
1072
1073void gk20a_pmu_isr(struct gk20a *g);
1074
1075/* send a cmd to pmu */
1076int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, struct pmu_msg *msg,
1077 struct pmu_payload *payload, u32 queue_id,
1078 pmu_callback callback, void* cb_param,
1079 u32 *seq_desc, unsigned long timeout);
1080
1081int gk20a_pmu_enable_elpg(struct gk20a *g);
1082int gk20a_pmu_disable_elpg(struct gk20a *g);
1083
1084void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries);
1085
1086int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable);
1087
1088int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token);
1089int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token);
1090int gk20a_pmu_destroy(struct gk20a *g);
1091int gk20a_pmu_load_norm(struct gk20a *g, u32 *load);
1092int gk20a_pmu_debugfs_init(struct platform_device *dev);
1093void gk20a_pmu_reset_load_counters(struct gk20a *g);
1094void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
1095 u32 *total_cycles);
1096
1097#endif /*__PMU_GK20A_H__*/