summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLakshmanan M <lm@nvidia.com>2016-09-08 13:28:19 -0400
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:56:50 -0500
commit90f80a282eff04412858361df35c2f88372e88cb (patch)
tree4de1169e9bc3f02416a01c933175b613f9ccbdfd /drivers/gpu
parentcb78f5aa749fcea198851ae4adf6e3acd47b37ac (diff)
gpu: nvgpu: Add pmgr support
This CL covers the following implementation, 1) Power Sensor Table parsing. 2) Power Topology Table parsing. 3) Add debugfs interface to get the current power(mW), current(mA) and voltage(uV) information from PMU. 4) Power Policy Table Parsing 5) Implement PMU boardobj interface for pmgr module. 6) Over current protection. JIRA DNVGPU-47 Change-Id: I7b1eefacc4f0a9824ab94ec8dcebefe81b7660d3 Signed-off-by: Lakshmanan M <lm@nvidia.com> Reviewed-on: http://git-master/r/1217189 (cherry picked from commit ecd0b16316cb4110118c6677f5f03e02921c29b6) Reviewed-on: http://git-master/r/1241953 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/Makefile.nvgpu-t18x7
-rw-r--r--drivers/gpu/nvgpu/include/bios.h156
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgr.c143
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgr.h31
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgrpmu.c524
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgrpmu.h29
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrdev.c310
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrdev.h51
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrmonitor.c365
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrmonitor.h60
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrpolicy.c680
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrpolicy.h117
-rw-r--r--drivers/gpu/nvgpu/pstate/pstate.c9
13 files changed, 2480 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu-t18x b/drivers/gpu/nvgpu/Makefile.nvgpu-t18x
index ceae6006..d5162332 100644
--- a/drivers/gpu/nvgpu/Makefile.nvgpu-t18x
+++ b/drivers/gpu/nvgpu/Makefile.nvgpu-t18x
@@ -44,7 +44,12 @@ nvgpu-y += \
44 $(nvgpu-t18x)/gp106/clk_gp106.o \ 44 $(nvgpu-t18x)/gp106/clk_gp106.o \
45 $(nvgpu-t18x)/gp106/gp106_gating_reglist.o \ 45 $(nvgpu-t18x)/gp106/gp106_gating_reglist.o \
46 $(nvgpu-t18x)/gp106/therm_gp106.o \ 46 $(nvgpu-t18x)/gp106/therm_gp106.o \
47 $(nvgpu-t18x)/gp106/xve_gp106.o 47 $(nvgpu-t18x)/gp106/xve_gp106.o \
48 $(nvgpu-t18x)/pmgr/pwrdev.o \
49 $(nvgpu-t18x)/pmgr/pmgr.o \
50 $(nvgpu-t18x)/pmgr/pmgrpmu.o \
51 $(nvgpu-t18x)/pmgr/pwrmonitor.o \
52 $(nvgpu-t18x)/pmgr/pwrpolicy.o
48 53
49nvgpu-$(CONFIG_TEGRA_GK20A) += $(nvgpu-t18x)/gp10b/platform_gp10b_tegra.o 54nvgpu-$(CONFIG_TEGRA_GK20A) += $(nvgpu-t18x)/gp10b/platform_gp10b_tegra.o
50 55
diff --git a/drivers/gpu/nvgpu/include/bios.h b/drivers/gpu/nvgpu/include/bios.h
index 83d972e3..d3a677f8 100644
--- a/drivers/gpu/nvgpu/include/bios.h
+++ b/drivers/gpu/nvgpu/include/bios.h
@@ -501,5 +501,159 @@ struct vbios_memory_clock_base_entry_11 {
501#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_12_FLAGS2_CMD_SCRIPT_INDEX_MASK 0x3 501#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_12_FLAGS2_CMD_SCRIPT_INDEX_MASK 0x3
502#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_12_FLAGS2_CMD_SCRIPT_INDEX_SHIFT 0 502#define VBIOS_MEMORY_CLOCK_BASE_ENTRY_12_FLAGS2_CMD_SCRIPT_INDEX_SHIFT 0
503 503
504#endif 504#define VBIOS_POWER_SENSORS_VERSION_2X 0x20
505#define VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08 0x00000008
506
507struct pwr_sensors_2x_header {
508 u8 version;
509 u8 header_size;
510 u8 table_entry_size;
511 u8 num_table_entries;
512 u32 ba_script_pointer;
513};
514
515#define VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15 0x00000015
516
517struct pwr_sensors_2x_entry {
518 u8 flags0;
519 u32 class_param0;
520 u32 sensor_param0;
521 u32 sensor_param1;
522 u32 sensor_param2;
523 u32 sensor_param3;
524};
525
526#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_MASK 0xF
527#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_SHIFT 0
528#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_I2C 0x00000001
529
530#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX_MASK 0xFF
531#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX_SHIFT 0
532#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8_MASK 0x100
533#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8_SHIFT 8
534
535#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM_MASK 0xFFFF
536#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM_SHIFT 0
537#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM_MASK 0xFFFF0000
538#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM_SHIFT 16
539#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM_MASK 0xFFFF
540#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM_SHIFT 0
541#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION_MASK 0xFFFF0000
542#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION_SHIFT 16
543
544#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE_MASK 0xFFFF
545#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE_SHIFT 0
546#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION_MASK 0xFF0000
547#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION_SHIFT 16
548#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M_MASK 0xFFFF
549#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M_SHIFT 0
550#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B_MASK 0xFFFF0000
551#define NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B_SHIFT 16
552
553#define VBIOS_POWER_TOPOLOGY_VERSION_2X 0x20
554#define VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06 0x00000006
555
556struct pwr_topology_2x_header {
557 u8 version;
558 u8 header_size;
559 u8 table_entry_size;
560 u8 num_table_entries;
561 u8 rel_entry_size;
562 u8 num_rel_entries;
563};
564
565#define VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16 0x00000016
566
567struct pwr_topology_2x_entry {
568 u8 flags0;
569 u8 pwr_rail;
570 u32 param0;
571 u32 curr_corr_slope;
572 u32 curr_corr_offset;
573 u32 param1;
574 u32 param2;
575};
576
577#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_MASK 0xF
578#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SHIFT 0
579#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SENSOR 0x00000001
580
581#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX_MASK 0xFF
582#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX_SHIFT 0
583#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX_MASK 0xFF00
584#define NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX_SHIFT 8
585
586#define VBIOS_POWER_POLICY_VERSION_3X 0x30
587#define VBIOS_POWER_POLICY_3X_HEADER_SIZE_25 0x00000025
588
589struct pwr_policy_3x_header_struct {
590 u8 version;
591 u8 header_size;
592 u8 table_entry_size;
593 u8 num_table_entries;
594 u16 base_sample_period;
595 u16 min_client_sample_period;
596 u8 table_rel_entry_size;
597 u8 num_table_rel_entries;
598 u8 tgp_policy_idx;
599 u8 rtp_policy_idx;
600 u8 mxm_policy_idx;
601 u8 dnotifier_policy_idx;
602 u32 d2_limit;
603 u32 d3_limit;
604 u32 d4_limit;
605 u32 d5_limit;
606 u8 low_sampling_mult;
607 u8 pwr_tgt_policy_idx;
608 u8 pwr_tgt_floor_policy_idx;
609 u8 sm_bus_policy_idx;
610 u8 table_viol_entry_size;
611 u8 num_table_viol_entries;
612};
613
614#define VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E 0x0000002E
615
616struct pwr_policy_3x_entry_struct {
617 u8 flags0;
618 u8 ch_idx;
619 u32 limit_min;
620 u32 limit_rated;
621 u32 limit_max;
622 u32 param0;
623 u32 param1;
624 u32 param2;
625 u32 param3;
626 u32 limit_batt;
627 u8 flags1;
628 u8 past_length;
629 u8 next_length;
630 u16 ratio_min;
631 u16 ratio_max;
632 u8 sample_mult;
633 u32 filter_param;
634};
635
636#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_MASK 0xF
637#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_SHIFT 0
638#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_HW_THRESHOLD 0x00000005
639#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT_MASK 0x10
640#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT_SHIFT 4
641
642#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FULL_DEFLECTION_LIMIT_MASK 0x1
643#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FULL_DEFLECTION_LIMIT_SHIFT 0
644#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL_MASK 0x2
645#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL_SHIFT 1
646#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE_MASK 0x3C
647#define NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE_SHIFT 2
648
649#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX_MASK 0xFF
650#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX_SHIFT 0
651#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX_MASK 0xFF00
652#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX_SHIFT 8
653#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE_MASK 0x10000
654#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE_SHIFT 16
655
656#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL_MASK 0xFFFF
657#define NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL_SHIFT 0
505 658
659#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pmgr.c b/drivers/gpu/nvgpu/pmgr/pmgr.c
new file mode 100644
index 00000000..f625e37d
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgr.c
@@ -0,0 +1,143 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrdev.h"
16#include "pmgrpmu.h"
17#include <linux/debugfs.h>
18
19#ifdef CONFIG_DEBUG_FS
20static int pmgr_pwr_devices_get_current_power(void *data, u64 *val)
21{
22 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
23 int status;
24 struct gk20a *g = (struct gk20a *)data;
25
26 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
27 if (status)
28 gk20a_err(dev_from_gk20a(g),
29 "pmgr_pwr_devices_get_current_power failed %x",
30 status);
31
32 *val = payload.devices[0].powerm_w;
33
34 return status;
35}
36
37static int pmgr_pwr_devices_get_current(void *data, u64 *val)
38{
39 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
40 int status;
41 struct gk20a *g = (struct gk20a *)data;
42
43 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
44 if (status)
45 gk20a_err(dev_from_gk20a(g),
46 "pmgr_pwr_devices_get_current failed %x",
47 status);
48
49 *val = payload.devices[0].currentm_a;
50
51 return status;
52}
53
54static int pmgr_pwr_devices_get_current_voltage(void *data, u64 *val)
55{
56 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
57 int status;
58 struct gk20a *g = (struct gk20a *)data;
59
60 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
61 if (status)
62 gk20a_err(dev_from_gk20a(g),
63 "pmgr_pwr_devices_get_current_voltage failed %x",
64 status);
65
66 *val = payload.devices[0].voltageu_v;
67
68 return status;
69}
70
71DEFINE_SIMPLE_ATTRIBUTE(
72 pmgr_power_ctrl_fops, pmgr_pwr_devices_get_current_power, NULL, "%llu\n");
73
74DEFINE_SIMPLE_ATTRIBUTE(
75 pmgr_current_ctrl_fops, pmgr_pwr_devices_get_current, NULL, "%llu\n");
76
77DEFINE_SIMPLE_ATTRIBUTE(
78 pmgr_voltage_ctrl_fops, pmgr_pwr_devices_get_current_voltage, NULL, "%llu\n");
79
80static void pmgr_debugfs_init(struct gk20a *g) {
81 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
82 struct dentry *dbgentry;
83
84 dbgentry = debugfs_create_file(
85 "power", S_IRUGO, platform->debugfs, g, &pmgr_power_ctrl_fops);
86 if (!dbgentry)
87 gk20a_err(dev_from_gk20a(g),
88 "debugfs entry create failed for power");
89
90 dbgentry = debugfs_create_file(
91 "current", S_IRUGO, platform->debugfs, g, &pmgr_current_ctrl_fops);
92 if (!dbgentry)
93 gk20a_err(dev_from_gk20a(g),
94 "debugfs entry create failed for current");
95
96 dbgentry = debugfs_create_file(
97 "voltage", S_IRUGO, platform->debugfs, g, &pmgr_voltage_ctrl_fops);
98 if (!dbgentry)
99 gk20a_err(dev_from_gk20a(g),
100 "debugfs entry create failed for voltage");
101}
102#endif
103
104u32 pmgr_domain_sw_setup(struct gk20a *g)
105{
106 u32 status;
107
108 status = pmgr_device_sw_setup(g);
109 if (status) {
110 gk20a_err(dev_from_gk20a(g),
111 "error creating boardobjgrp for pmgr devices, status - 0x%x",
112 status);
113 goto exit;
114 }
115
116 status = pmgr_monitor_sw_setup(g);
117 if (status) {
118 gk20a_err(dev_from_gk20a(g),
119 "error creating boardobjgrp for pmgr monitor, status - 0x%x",
120 status);
121 goto exit;
122 }
123
124 status = pmgr_policy_sw_setup(g);
125 if (status) {
126 gk20a_err(dev_from_gk20a(g),
127 "error creating boardobjgrp for pmgr policy, status - 0x%x",
128 status);
129 goto exit;
130 }
131
132#ifdef CONFIG_DEBUG_FS
133 pmgr_debugfs_init(g);
134#endif
135
136exit:
137 return status;
138}
139
140u32 pmgr_domain_pmu_setup(struct gk20a *g)
141{
142 return pmgr_send_pmgr_tables_to_pmu(g);
143}
diff --git a/drivers/gpu/nvgpu/pmgr/pmgr.h b/drivers/gpu/nvgpu/pmgr/pmgr.h
new file mode 100644
index 00000000..97e7b609
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgr.h
@@ -0,0 +1,31 @@
1/*
2 * general power device structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PMGR_H_
16#define _PMGR_H_
17
18#include "pwrdev.h"
19#include "pwrmonitor.h"
20#include "pwrpolicy.h"
21
22struct pmgr_pmupstate {
23 struct pwr_devices pmgr_deviceobjs;
24 struct pmgr_pwr_monitor pmgr_monitorobjs;
25 struct pmgr_pwr_policy pmgr_policyobjs;
26};
27
28u32 pmgr_domain_sw_setup(struct gk20a *g);
29u32 pmgr_domain_pmu_setup(struct gk20a *g);
30
31#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pmgrpmu.c b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
new file mode 100644
index 00000000..ea070060
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
@@ -0,0 +1,524 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrdev.h"
16#include "include/bios.h"
17#include "boardobj/boardobjgrp.h"
18#include "boardobj/boardobjgrp_e32.h"
19#include "pmuif/gpmuifboardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "gm206/bios_gm206.h"
22#include "gk20a/pmu_gk20a.h"
23#include "pmgrpmu.h"
24
25struct pmgr_pmucmdhandler_params {
26 u32 success;
27};
28
29static void pmgr_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
30 void *param, u32 handle, u32 status)
31{
32 struct pmgr_pmucmdhandler_params *phandlerparams =
33 (struct pmgr_pmucmdhandler_params *)param;
34
35 if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) &&
36 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) &&
37 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) {
38 gk20a_err(dev_from_gk20a(g),
39 "unknow msg %x",
40 msg->msg.pmgr.msg_type);
41 return;
42 }
43
44 if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_SET_OBJECT) {
45 if ((msg->msg.pmgr.set_object.b_success != 1) ||
46 (msg->msg.pmgr.set_object.flcnstatus != 0) ) {
47 gk20a_err(dev_from_gk20a(g),
48 "pmgr msg failed %x %x %x %x",
49 msg->msg.pmgr.set_object.msg_type,
50 msg->msg.pmgr.set_object.b_success,
51 msg->msg.pmgr.set_object.flcnstatus,
52 msg->msg.pmgr.set_object.object_type);
53 return;
54 }
55 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_QUERY) {
56 if ((msg->msg.pmgr.query.b_success != 1) ||
57 (msg->msg.pmgr.query.flcnstatus != 0) ) {
58 gk20a_err(dev_from_gk20a(g),
59 "pmgr msg failed %x %x %x %x",
60 msg->msg.pmgr.query.msg_type,
61 msg->msg.pmgr.query.b_success,
62 msg->msg.pmgr.query.flcnstatus,
63 msg->msg.pmgr.query.cmd_type);
64 return;
65 }
66 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_LOAD) {
67 if ((msg->msg.pmgr.query.b_success != 1) ||
68 (msg->msg.pmgr.query.flcnstatus != 0) ) {
69 gk20a_err(dev_from_gk20a(g),
70 "pmgr msg failed %x %x %x",
71 msg->msg.pmgr.load.msg_type,
72 msg->msg.pmgr.load.b_success,
73 msg->msg.pmgr.load.flcnstatus);
74 return;
75 }
76 }
77
78 phandlerparams->success = 1;
79}
80
81static u32 pmgr_pmu_set_object(struct gk20a *g,
82 u8 type,
83 u16 dmem_size,
84 u16 fb_size,
85 void *pobj)
86{
87 struct pmu_cmd cmd = { {0} };
88 struct pmu_payload payload = { {0} };
89 struct nv_pmu_pmgr_cmd_set_object *pcmd;
90 u32 status;
91 u32 seqdesc;
92 struct pmgr_pmucmdhandler_params handlerparams = {0};
93
94 cmd.hdr.unit_id = PMU_UNIT_PMGR;
95 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_set_object) +
96 (u32)sizeof(struct pmu_hdr);;
97
98 pcmd = &cmd.cmd.pmgr.set_object;
99 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_SET_OBJECT;
100 pcmd->object_type = type;
101
102 payload.in.buf = pobj;
103 payload.in.size = dmem_size;
104 payload.in.fb_size = fb_size;
105 payload.in.offset = NV_PMU_PMGR_SET_OBJECT_ALLOC_OFFSET;
106
107 /* Setup the handler params to communicate back results.*/
108 handlerparams.success = 0;
109
110 status = gk20a_pmu_cmd_post(g, &cmd, NULL, &payload,
111 PMU_COMMAND_QUEUE_LPQ,
112 pmgr_pmucmdhandler,
113 (void *)&handlerparams,
114 &seqdesc, ~0);
115 if (status) {
116 gk20a_err(dev_from_gk20a(g),
117 "unable to post pmgr cmd for unit %x cmd id %x obj type %x",
118 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->object_type);
119 goto exit;
120 }
121
122 pmu_wait_message_cond(&g->pmu,
123 gk20a_get_gr_idle_timeout(g),
124 &handlerparams.success, 1);
125
126 if (handlerparams.success == 0) {
127 gk20a_err(dev_from_gk20a(g), "could not process cmd\n");
128 status = -ETIMEDOUT;
129 goto exit;
130 }
131
132exit:
133 return status;
134}
135
136static u32 pmgr_send_i2c_device_topology_to_pmu(struct gk20a *g)
137{
138 struct nv_pmu_pmgr_i2c_device_desc_table i2c_desc_table;
139 u32 status = 0;
140
141 /* INA3221 I2C device info */
142 i2c_desc_table.dev_mask = 0x01;
143
144 /* INA3221 */
145 i2c_desc_table.devices[0].super.type = 0x4E;
146
147 i2c_desc_table.devices[0].dcb_index = 0;
148 i2c_desc_table.devices[0].i2c_address = 0x84;
149 i2c_desc_table.devices[0].i2c_flags = 0xC2F;
150 i2c_desc_table.devices[0].i2c_port = 0x2;
151
152 /* Pass the table down the PMU as an object */
153 status = pmgr_pmu_set_object(
154 g,
155 NV_PMU_PMGR_OBJECT_I2C_DEVICE_DESC_TABLE,
156 (u16)sizeof(struct nv_pmu_pmgr_i2c_device_desc_table),
157 PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED,
158 &i2c_desc_table);
159
160 if (status)
161 gk20a_err(dev_from_gk20a(g),
162 "pmgr_pmu_set_object failed %x",
163 status);
164
165 return status;
166}
167
168static u32 pmgr_send_pwr_device_topology_to_pmu(struct gk20a *g)
169{
170 struct nv_pmu_pmgr_pwr_device_desc_table pwr_desc_table;
171 struct nv_pmu_pmgr_pwr_device_desc_table_header *ppwr_desc_header;
172 u32 status = 0;
173
174 /* Set the BA-device-independent HW information */
175 ppwr_desc_header = &(pwr_desc_table.hdr.data);
176 ppwr_desc_header->ba_info.b_initialized_and_used = false;
177
178 /* populate the table */
179 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&ppwr_desc_header->super,
180 g->pmgr_pmu.pmgr_deviceobjs.super.super.objmask);
181
182 status = boardobjgrp_pmudatainit_legacy(g,
183 &g->pmgr_pmu.pmgr_deviceobjs.super.super,
184 (struct nv_pmu_boardobjgrp_super *)&pwr_desc_table);
185
186 if (status) {
187 gk20a_err(dev_from_gk20a(g),
188 "boardobjgrp_pmudatainit_legacy failed %x",
189 status);
190 goto exit;
191 }
192
193 /* Pass the table down the PMU as an object */
194 status = pmgr_pmu_set_object(
195 g,
196 NV_PMU_PMGR_OBJECT_PWR_DEVICE_DESC_TABLE,
197 (u16)sizeof(
198 union nv_pmu_pmgr_pwr_device_dmem_size),
199 (u16)sizeof(struct nv_pmu_pmgr_pwr_device_desc_table),
200 &pwr_desc_table);
201
202 if (status)
203 gk20a_err(dev_from_gk20a(g),
204 "pmgr_pmu_set_object failed %x",
205 status);
206
207exit:
208 return status;
209}
210
211static u32 pmgr_send_pwr_mointer_to_pmu(struct gk20a *g)
212{
213 struct nv_pmu_pmgr_pwr_monitor_pack pwr_monitor_pack;
214 struct nv_pmu_pmgr_pwr_channel_header *pwr_channel_hdr;
215 struct nv_pmu_pmgr_pwr_chrelationship_header *pwr_chrelationship_header;
216 u32 max_dmem_size;
217 u32 status = 0;
218
219 /* Copy all the global settings from the RM copy */
220 pwr_channel_hdr = &(pwr_monitor_pack.channels.hdr.data);
221 pwr_monitor_pack = g->pmgr_pmu.pmgr_monitorobjs.pmu_data;
222
223 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_channel_hdr->super,
224 g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask);
225
226 /* Copy in each channel */
227 status = boardobjgrp_pmudatainit_legacy(g,
228 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super,
229 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack.channels));
230
231 if (status) {
232 gk20a_err(dev_from_gk20a(g),
233 "boardobjgrp_pmudatainit_legacy failed %x",
234 status);
235 goto exit;
236 }
237
238 /* Copy in each channel relationship */
239 pwr_chrelationship_header = &(pwr_monitor_pack.ch_rels.hdr.data);
240
241 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_chrelationship_header->super,
242 g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super.objmask);
243
244 pwr_channel_hdr->physical_channel_mask = g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask;
245 pwr_channel_hdr->type = NV_PMU_PMGR_PWR_MONITOR_TYPE_NO_POLLING;
246
247 status = boardobjgrp_pmudatainit_legacy(g,
248 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super,
249 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack.ch_rels));
250
251 if (status) {
252 gk20a_err(dev_from_gk20a(g),
253 "boardobjgrp_pmudatainit_legacy failed %x",
254 status);
255 goto exit;
256 }
257
258 /* Calculate the max Dmem buffer size */
259 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_monitor_dmem_size);
260
261 /* Pass the table down the PMU as an object */
262 status = pmgr_pmu_set_object(
263 g,
264 NV_PMU_PMGR_OBJECT_PWR_MONITOR,
265 (u16)max_dmem_size,
266 (u16)sizeof(struct nv_pmu_pmgr_pwr_monitor_pack),
267 &pwr_monitor_pack);
268
269 if (status)
270 gk20a_err(dev_from_gk20a(g),
271 "pmgr_pmu_set_object failed %x",
272 status);
273
274exit:
275 return status;
276}
277
278u32 pmgr_send_pwr_policy_to_pmu(struct gk20a *g)
279{
280 struct nv_pmu_pmgr_pwr_policy_pack *ppwrpack = NULL;
281 struct pwr_policy *ppolicy = NULL;
282 u32 status = 0;
283 u8 indx;
284 u32 max_dmem_size;
285
286 ppwrpack = kzalloc(sizeof(struct nv_pmu_pmgr_pwr_policy_pack), GFP_KERNEL);
287 if (!ppwrpack) {
288 gk20a_err(dev_from_gk20a(g),
289 "pwr policy alloc failed %x",
290 status);
291 status = -ENOMEM;
292 goto exit;
293 }
294
295 ppwrpack->policies.hdr.data.version = g->pmgr_pmu.pmgr_policyobjs.version;
296 ppwrpack->policies.hdr.data.b_enabled = g->pmgr_pmu.pmgr_policyobjs.b_enabled;
297
298 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
299 &ppwrpack->policies.hdr.data.super,
300 g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super.objmask);
301
302 memset(&ppwrpack->policies.hdr.data.reserved_pmu_policy_mask,
303 0,
304 sizeof(ppwrpack->policies.hdr.data.reserved_pmu_policy_mask));
305
306 ppwrpack->policies.hdr.data.base_sample_period =
307 g->pmgr_pmu.pmgr_policyobjs.base_sample_period;
308 ppwrpack->policies.hdr.data.min_client_sample_period =
309 g->pmgr_pmu.pmgr_policyobjs.min_client_sample_period;
310 ppwrpack->policies.hdr.data.low_sampling_mult =
311 g->pmgr_pmu.pmgr_policyobjs.low_sampling_mult;
312
313 memcpy(&ppwrpack->policies.hdr.data.global_ceiling,
314 &g->pmgr_pmu.pmgr_policyobjs.global_ceiling,
315 sizeof(struct nv_pmu_perf_domain_group_limits));
316
317 memcpy(&ppwrpack->policies.hdr.data.semantic_policy_tbl,
318 &g->pmgr_pmu.pmgr_policyobjs.policy_idxs,
319 sizeof(g->pmgr_pmu.pmgr_policyobjs.policy_idxs));
320
321 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx,
322 ppwrpack->policies.hdr.data.super.obj_mask.super.data[0]) {
323 ppolicy = PMGR_GET_PWR_POLICY(g, indx);
324
325 status = ((struct boardobj *)ppolicy)->pmudatainit(g, (struct boardobj *)ppolicy,
326 (struct nv_pmu_boardobj *)&(ppwrpack->policies.policies[indx].data));
327 if (status) {
328 gk20a_err(dev_from_gk20a(g),
329 "pmudatainit failed %x indx %x",
330 status, indx);
331 status = -ENOMEM;
332 goto exit;
333 }
334 }
335 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END;
336
337 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
338 &ppwrpack->policy_rels.hdr.data.super,
339 g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super.objmask);
340
341 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
342 &ppwrpack->violations.hdr.data.super,
343 g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super.objmask);
344
345 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_policy_dmem_size);
346
347 /* Pass the table down the PMU as an object */
348 status = pmgr_pmu_set_object(
349 g,
350 NV_PMU_PMGR_OBJECT_PWR_POLICY,
351 (u16)max_dmem_size,
352 (u16)sizeof(struct nv_pmu_pmgr_pwr_policy_pack),
353 ppwrpack);
354
355 if (status)
356 gk20a_err(dev_from_gk20a(g),
357 "pmgr_pmu_set_object failed %x",
358 status);
359
360exit:
361 if (ppwrpack) {
362 kfree(ppwrpack);
363 }
364
365 return status;
366}
367
368u32 pmgr_pmu_pwr_devices_query_blocking(
369 struct gk20a *g,
370 u32 pwr_dev_mask,
371 struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload)
372{
373 struct pmu_cmd cmd = { {0} };
374 struct pmu_payload payload = { {0} };
375 struct nv_pmu_pmgr_cmd_pwr_devices_query *pcmd;
376 u32 status;
377 u32 seqdesc;
378 struct pmgr_pmucmdhandler_params handlerparams = {0};
379
380 cmd.hdr.unit_id = PMU_UNIT_PMGR;
381 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_pwr_devices_query) +
382 (u32)sizeof(struct pmu_hdr);
383
384 pcmd = &cmd.cmd.pmgr.pwr_dev_query;
385 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_PWR_DEVICES_QUERY;
386 pcmd->dev_mask = pwr_dev_mask;
387
388 payload.out.buf = ppayload;
389 payload.out.size = sizeof(struct nv_pmu_pmgr_pwr_devices_query_payload);
390 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
391 payload.out.offset = NV_PMU_PMGR_PWR_DEVICES_QUERY_ALLOC_OFFSET;
392
393 /* Setup the handler params to communicate back results.*/
394 handlerparams.success = 0;
395
396 status = gk20a_pmu_cmd_post(g, &cmd, NULL, &payload,
397 PMU_COMMAND_QUEUE_LPQ,
398 pmgr_pmucmdhandler,
399 (void *)&handlerparams,
400 &seqdesc, ~0);
401 if (status) {
402 gk20a_err(dev_from_gk20a(g),
403 "unable to post pmgr query cmd for unit %x cmd id %x dev mask %x",
404 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->dev_mask);
405 goto exit;
406 }
407
408 pmu_wait_message_cond(&g->pmu,
409 gk20a_get_gr_idle_timeout(g),
410 &handlerparams.success, 1);
411
412 if (handlerparams.success == 0) {
413 gk20a_err(dev_from_gk20a(g), "could not process cmd\n");
414 status = -ETIMEDOUT;
415 goto exit;
416 }
417
418exit:
419 return status;
420}
421
422static u32 pmgr_pmu_load_blocking(struct gk20a *g)
423{
424 struct pmu_cmd cmd = { {0} };
425 struct nv_pmu_pmgr_cmd_load *pcmd;
426 u32 status;
427 u32 seqdesc;
428 struct pmgr_pmucmdhandler_params handlerparams = {0};
429
430 cmd.hdr.unit_id = PMU_UNIT_PMGR;
431 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_load) +
432 (u32)sizeof(struct pmu_hdr);
433
434 pcmd = &cmd.cmd.pmgr.load;
435 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_LOAD;
436
437 /* Setup the handler params to communicate back results.*/
438 handlerparams.success = 0;
439
440 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
441 PMU_COMMAND_QUEUE_LPQ,
442 pmgr_pmucmdhandler,
443 (void *)&handlerparams,
444 &seqdesc, ~0);
445 if (status) {
446 gk20a_err(dev_from_gk20a(g),
447 "unable to post pmgr load cmd for unit %x cmd id %x",
448 cmd.hdr.unit_id, pcmd->cmd_type);
449 goto exit;
450 }
451
452 pmu_wait_message_cond(&g->pmu,
453 gk20a_get_gr_idle_timeout(g),
454 &handlerparams.success, 1);
455
456 if (handlerparams.success == 0) {
457 gk20a_err(dev_from_gk20a(g), "could not process cmd\n");
458 status = -ETIMEDOUT;
459 goto exit;
460 }
461
462exit:
463 return status;
464}
465
466u32 pmgr_send_pmgr_tables_to_pmu(struct gk20a *g)
467{
468 u32 status = 0;
469
470 status = pmgr_send_i2c_device_topology_to_pmu(g);
471
472 if (status) {
473 gk20a_err(dev_from_gk20a(g),
474 "pmgr_send_i2c_device_topology_to_pmu failed %x",
475 status);
476 goto exit;
477 }
478
479 if (!BOARDOBJGRP_IS_EMPTY(&g->pmgr_pmu.pmgr_deviceobjs.super.super)) {
480 status = pmgr_send_pwr_device_topology_to_pmu(g);
481 if (status) {
482 gk20a_err(dev_from_gk20a(g),
483 "pmgr_send_pwr_device_topology_to_pmu failed %x",
484 status);
485 goto exit;
486 }
487 }
488
489 if (!(BOARDOBJGRP_IS_EMPTY(
490 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super)) ||
491 !(BOARDOBJGRP_IS_EMPTY(
492 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super))) {
493 status = pmgr_send_pwr_mointer_to_pmu(g);
494 if (status) {
495 gk20a_err(dev_from_gk20a(g),
496 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
497 goto exit;
498 }
499 }
500
501 if (!(BOARDOBJGRP_IS_EMPTY(
502 &g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super)) ||
503 !(BOARDOBJGRP_IS_EMPTY(
504 &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super)) ||
505 !(BOARDOBJGRP_IS_EMPTY(
506 &g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super))) {
507 status = pmgr_send_pwr_policy_to_pmu(g);
508 if (status) {
509 gk20a_err(dev_from_gk20a(g),
510 "pmgr_send_pwr_policy_to_pmu failed %x", status);
511 goto exit;
512 }
513 }
514
515 status = pmgr_pmu_load_blocking(g);
516 if (status) {
517 gk20a_err(dev_from_gk20a(g),
518 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
519 goto exit;
520 }
521
522exit:
523 return status;
524}
diff --git a/drivers/gpu/nvgpu/pmgr/pmgrpmu.h b/drivers/gpu/nvgpu/pmgr/pmgrpmu.h
new file mode 100644
index 00000000..6b48396c
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgrpmu.h
@@ -0,0 +1,29 @@
1/*
2 * general power device control structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PMGRPMU_H_
16#define _PMGRPMU_H_
17
18#include "gk20a/gk20a.h"
19#include "pwrdev.h"
20#include "pwrmonitor.h"
21
22u32 pmgr_send_pmgr_tables_to_pmu(struct gk20a *g);
23
24u32 pmgr_pmu_pwr_devices_query_blocking(
25 struct gk20a *g,
26 u32 pwr_dev_mask,
27 struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload);
28
29#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.c b/drivers/gpu/nvgpu/pmgr/pwrdev.c
new file mode 100644
index 00000000..03e2eb34
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrdev.c
@@ -0,0 +1,310 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrdev.h"
16#include "include/bios.h"
17#include "boardobj/boardobjgrp.h"
18#include "boardobj/boardobjgrp_e32.h"
19#include "pmuif/gpmuifboardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "gm206/bios_gm206.h"
22#include "gk20a/pmu_gk20a.h"
23
24static u32 _pwr_device_pmudata_instget(struct gk20a *g,
25 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
26 struct nv_pmu_boardobj **ppboardobjpmudata,
27 u8 idx)
28{
29 struct nv_pmu_pmgr_pwr_device_desc_table *ppmgrdevice =
30 (struct nv_pmu_pmgr_pwr_device_desc_table *)pmuboardobjgrp;
31
32 gk20a_dbg_info("");
33
34 /*check whether pmuboardobjgrp has a valid boardobj in index*/
35 if (((u32)BIT(idx) &
36 ppmgrdevice->hdr.data.super.obj_mask.super.data[0]) == 0)
37 return -EINVAL;
38
39 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
40 &ppmgrdevice->devices[idx].data.board_obj;
41
42 gk20a_dbg_info(" Done");
43
44 return 0;
45}
46
47static u32 _pwr_domains_pmudatainit_ina3221(struct gk20a *g,
48 struct boardobj *board_obj_ptr,
49 struct nv_pmu_boardobj *ppmudata)
50{
51 struct nv_pmu_pmgr_pwr_device_desc_ina3221 *ina3221_desc;
52 struct pwr_device_ina3221 *ina3221;
53 u32 status = 0;
54 u32 indx;
55
56 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
57 if (status) {
58 gk20a_err(dev_from_gk20a(g),
59 "error updating pmu boardobjgrp for pwr domain 0x%x",
60 status);
61 goto done;
62 }
63
64 ina3221 = (struct pwr_device_ina3221 *)board_obj_ptr;
65 ina3221_desc = (struct nv_pmu_pmgr_pwr_device_desc_ina3221 *) ppmudata;
66
67 ina3221_desc->super.power_corr_factor = ina3221->super.power_corr_factor;
68 ina3221_desc->i2c_dev_idx = ina3221->super.i2c_dev_idx;
69 ina3221_desc->configuration = ina3221->configuration;
70 ina3221_desc->mask_enable = ina3221->mask_enable;
71 /* configure NV_PMU_THERM_EVENT_EXT_OVERT */
72 ina3221_desc->event_mask = (1 << 0);
73 ina3221_desc->curr_correct_m = ina3221->curr_correct_m;
74 ina3221_desc->curr_correct_b = ina3221->curr_correct_b;
75
76 for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) {
77 ina3221_desc->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx];
78 }
79
80done:
81 return status;
82}
83
84static struct boardobj *construct_pwr_device(struct gk20a *g,
85 void *pargs, u16 pargs_size, u8 type)
86{
87 struct boardobj *board_obj_ptr = NULL;
88 u32 status;
89 u32 indx;
90 struct pwr_device_ina3221 *pwrdev;
91 struct pwr_device_ina3221 *ina3221 = (struct pwr_device_ina3221*)pargs;
92
93 status = boardobj_construct_super(g, &board_obj_ptr,
94 pargs_size, pargs);
95 if (status)
96 return NULL;
97
98 pwrdev = (struct pwr_device_ina3221*)board_obj_ptr;
99
100 /* Set Super class interfaces */
101 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_ina3221;
102 pwrdev->super.power_rail = ina3221->super.power_rail;
103 pwrdev->super.i2c_dev_idx = ina3221->super.i2c_dev_idx;
104 pwrdev->super.power_corr_factor = (1 << 12);
105 pwrdev->super.bIs_inforom_config = false;
106
107 /* Set INA3221-specific information */
108 pwrdev->configuration = ina3221->configuration;
109 pwrdev->mask_enable = ina3221->mask_enable;
110 pwrdev->gpio_function = ina3221->gpio_function;
111 pwrdev->curr_correct_m = ina3221->curr_correct_m;
112 pwrdev->curr_correct_b = ina3221->curr_correct_b;
113
114 for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) {
115 pwrdev->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx];
116 }
117
118 gk20a_dbg_info(" Done");
119
120 return board_obj_ptr;
121}
122
123static u32 devinit_get_pwr_device_table(struct gk20a *g,
124 struct pwr_devices *ppwrdeviceobjs)
125{
126 u32 status = 0;
127 u8 *pwr_device_table_ptr = NULL;
128 u8 *curr_pwr_device_table_ptr = NULL;
129 struct boardobj *boardobj;
130 struct pwr_sensors_2x_header pwr_sensor_table_header = { 0 };
131 struct pwr_sensors_2x_entry pwr_sensor_table_entry = { 0 };
132 u32 index;
133 u32 obj_index = 0;
134 u16 pwr_device_size;
135 union {
136 struct boardobj boardobj;
137 struct pwr_device pwrdev;
138 struct pwr_device_ina3221 ina3221;
139 } pwr_device_data;
140
141 gk20a_dbg_info("");
142
143 if (g->ops.bios.get_perf_table_ptrs != NULL) {
144 pwr_device_table_ptr = (u8 *)g->ops.bios.get_perf_table_ptrs(g,
145 g->bios.perf_token, POWER_SENSORS_TABLE);
146 if (pwr_device_table_ptr == NULL) {
147 status = -EINVAL;
148 goto done;
149 }
150 }
151
152 memcpy(&pwr_sensor_table_header, pwr_device_table_ptr,
153 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08);
154
155 if (pwr_sensor_table_header.version !=
156 VBIOS_POWER_SENSORS_VERSION_2X) {
157 status = -EINVAL;
158 goto done;
159 }
160
161 if (pwr_sensor_table_header.header_size <
162 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08) {
163 status = -EINVAL;
164 goto done;
165 }
166
167 if (pwr_sensor_table_header.table_entry_size !=
168 VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15) {
169 status = -EINVAL;
170 goto done;
171 }
172
173 curr_pwr_device_table_ptr = (pwr_device_table_ptr +
174 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08);
175
176 for (index = 0; index < pwr_sensor_table_header.num_table_entries; index++) {
177 bool use_fxp8_8 = false;
178 u8 i2c_dev_idx;
179 u8 device_type;
180
181 curr_pwr_device_table_ptr += (pwr_sensor_table_header.table_entry_size * index);
182
183 pwr_sensor_table_entry.flags0 = *curr_pwr_device_table_ptr;
184
185 memcpy(&pwr_sensor_table_entry.class_param0,
186 (curr_pwr_device_table_ptr + 1),
187 (VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15 - 1));
188
189 device_type = (u8)BIOS_GET_FIELD(
190 pwr_sensor_table_entry.flags0,
191 NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS);
192
193 if (device_type == NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_I2C) {
194 i2c_dev_idx = (u8)BIOS_GET_FIELD(
195 pwr_sensor_table_entry.class_param0,
196 NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX);
197 use_fxp8_8 = (u8)BIOS_GET_FIELD(
198 pwr_sensor_table_entry.class_param0,
199 NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8);
200
201 pwr_device_data.ina3221.super.i2c_dev_idx = i2c_dev_idx;
202 pwr_device_data.ina3221.r_shuntm_ohm[0].use_fxp8_8 = use_fxp8_8;
203 pwr_device_data.ina3221.r_shuntm_ohm[1].use_fxp8_8 = use_fxp8_8;
204 pwr_device_data.ina3221.r_shuntm_ohm[2].use_fxp8_8 = use_fxp8_8;
205 pwr_device_data.ina3221.r_shuntm_ohm[0].rshunt_value =
206 (u16)BIOS_GET_FIELD(
207 pwr_sensor_table_entry.sensor_param0,
208 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM);
209
210 pwr_device_data.ina3221.r_shuntm_ohm[1].rshunt_value =
211 (u16)BIOS_GET_FIELD(
212 pwr_sensor_table_entry.sensor_param0,
213 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM);
214
215 pwr_device_data.ina3221.r_shuntm_ohm[2].rshunt_value =
216 (u16)BIOS_GET_FIELD(
217 pwr_sensor_table_entry.sensor_param1,
218 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM);
219 pwr_device_data.ina3221.configuration =
220 (u16)BIOS_GET_FIELD(
221 pwr_sensor_table_entry.sensor_param1,
222 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION);
223
224 pwr_device_data.ina3221.mask_enable =
225 (u16)BIOS_GET_FIELD(
226 pwr_sensor_table_entry.sensor_param2,
227 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE);
228
229 pwr_device_data.ina3221.gpio_function =
230 (u8)BIOS_GET_FIELD(
231 pwr_sensor_table_entry.sensor_param2,
232 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION);
233
234 pwr_device_data.ina3221.curr_correct_m =
235 (u16)BIOS_GET_FIELD(
236 pwr_sensor_table_entry.sensor_param3,
237 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M);
238
239 pwr_device_data.ina3221.curr_correct_b =
240 (u16)BIOS_GET_FIELD(
241 pwr_sensor_table_entry.sensor_param3,
242 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B);
243
244 if (!pwr_device_data.ina3221.curr_correct_m) {
245 pwr_device_data.ina3221.curr_correct_m = (1 << 12);
246 }
247 pwr_device_size = sizeof(struct pwr_device_ina3221);
248 } else
249 continue;
250
251 pwr_device_data.boardobj.type = CTRL_PMGR_PWR_DEVICE_TYPE_INA3221;
252 pwr_device_data.pwrdev.power_rail = (u8)0;
253
254 boardobj = construct_pwr_device(g, &pwr_device_data,
255 pwr_device_size, pwr_device_data.boardobj.type);
256
257 if (!boardobj) {
258 gk20a_err(dev_from_gk20a(g),
259 "unable to create pwr device for %d type %d", index, pwr_device_data.boardobj.type);
260 status = -EINVAL;
261 goto done;
262 }
263
264 status = boardobjgrp_objinsert(&ppwrdeviceobjs->super.super,
265 boardobj, obj_index);
266
267 if (status) {
268 gk20a_err(dev_from_gk20a(g),
269 "unable to insert pwr device boardobj for %d", index);
270 status = -EINVAL;
271 goto done;
272 }
273
274 ++obj_index;
275 }
276
277done:
278 gk20a_dbg_info(" done status %x", status);
279 return status;
280}
281
282u32 pmgr_device_sw_setup(struct gk20a *g)
283{
284 u32 status;
285 struct boardobjgrp *pboardobjgrp = NULL;
286 struct pwr_devices *ppwrdeviceobjs;
287
288 /* Construct the Super Class and override the Interfaces */
289 status = boardobjgrpconstruct_e32(&g->pmgr_pmu.pmgr_deviceobjs.super);
290 if (status) {
291 gk20a_err(dev_from_gk20a(g),
292 "error creating boardobjgrp for pmgr devices, status - 0x%x",
293 status);
294 goto done;
295 }
296
297 pboardobjgrp = &g->pmgr_pmu.pmgr_deviceobjs.super.super;
298 ppwrdeviceobjs = &(g->pmgr_pmu.pmgr_deviceobjs);
299
300 /* Override the Interfaces */
301 pboardobjgrp->pmudatainstget = _pwr_device_pmudata_instget;
302
303 status = devinit_get_pwr_device_table(g, ppwrdeviceobjs);
304 if (status)
305 goto done;
306
307done:
308 gk20a_dbg_info(" done status %x", status);
309 return status;
310}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.h b/drivers/gpu/nvgpu/pmgr/pwrdev.h
new file mode 100644
index 00000000..b8592a18
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrdev.h
@@ -0,0 +1,51 @@
1/*
2 * general power device structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PWRDEV_H_
16#define _PWRDEV_H_
17
18#include "boardobj/boardobj.h"
19#include "pmuif/gpmuifpmgr.h"
20#include "ctrl/ctrlpmgr.h"
21
22#define PWRDEV_I2CDEV_DEVICE_INDEX_NONE (0xFF)
23
24#define PWR_DEVICE_PROV_NUM_DEFAULT 1
25
26struct pwr_device {
27 struct boardobj super;
28 u8 power_rail;
29 u8 i2c_dev_idx;
30 bool bIs_inforom_config;
31 u32 power_corr_factor;
32};
33
34struct pwr_devices {
35 struct boardobjgrp_e32 super;
36};
37
38struct pwr_device_ina3221 {
39 struct pwr_device super;
40 struct ctrl_pmgr_pwr_device_info_rshunt
41 r_shuntm_ohm[NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM];
42 u16 configuration;
43 u16 mask_enable;
44 u8 gpio_function;
45 u16 curr_correct_m;
46 s16 curr_correct_b;
47} ;
48
49u32 pmgr_device_sw_setup(struct gk20a *g);
50
51#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
new file mode 100644
index 00000000..c28751fd
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
@@ -0,0 +1,365 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrdev.h"
16#include "include/bios.h"
17#include "boardobj/boardobjgrp.h"
18#include "boardobj/boardobjgrp_e32.h"
19#include "pmuif/gpmuifboardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "gm206/bios_gm206.h"
22#include "gk20a/pmu_gk20a.h"
23
24static u32 _pwr_channel_pmudata_instget(struct gk20a *g,
25 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
26 struct nv_pmu_boardobj **ppboardobjpmudata,
27 u8 idx)
28{
29 struct nv_pmu_pmgr_pwr_channel_desc *ppmgrchannel =
30 (struct nv_pmu_pmgr_pwr_channel_desc *)pmuboardobjgrp;
31
32 gk20a_dbg_info("");
33
34 /*check whether pmuboardobjgrp has a valid boardobj in index*/
35 if (((u32)BIT(idx) &
36 ppmgrchannel->hdr.data.super.obj_mask.super.data[0]) == 0)
37 return -EINVAL;
38
39 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
40 &ppmgrchannel->channels[idx].data.board_obj;
41
42 /* handle Global/common data here as we need index */
43 ppmgrchannel->channels[idx].data.pwr_channel.ch_idx = idx;
44
45 gk20a_dbg_info(" Done");
46
47 return 0;
48}
49
50static u32 _pwr_channel_rels_pmudata_instget(struct gk20a *g,
51 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
52 struct nv_pmu_boardobj **ppboardobjpmudata,
53 u8 idx)
54{
55 struct nv_pmu_pmgr_pwr_chrelationship_desc *ppmgrchrels =
56 (struct nv_pmu_pmgr_pwr_chrelationship_desc *)pmuboardobjgrp;
57
58 gk20a_dbg_info("");
59
60 /*check whether pmuboardobjgrp has a valid boardobj in index*/
61 if (((u32)BIT(idx) &
62 ppmgrchrels->hdr.data.super.obj_mask.super.data[0]) == 0)
63 return -EINVAL;
64
65 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
66 &ppmgrchrels->ch_rels[idx].data.board_obj;
67
68 gk20a_dbg_info(" Done");
69
70 return 0;
71}
72
73static u32 _pwr_channel_state_init(struct gk20a *g)
74{
75 u8 indx = 0;
76 struct pwr_channel *pchannel;
77 u32 objmask =
78 g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask;
79
80 /* Initialize each PWR_CHANNEL's dependent channel mask */
81 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx, objmask) {
82 pchannel = PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, indx);
83 if (pchannel == NULL) {
84 gk20a_err(dev_from_gk20a(g),
85 "PMGR_PWR_MONITOR_GET_PWR_CHANNEL-failed %d", indx);
86 return -EINVAL;
87 }
88 pchannel->dependent_ch_mask =0;
89 }
90 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END
91
92 return 0;
93}
94
95static bool _pwr_channel_implements(struct pwr_channel *pchannel,
96 u8 type)
97{
98 return (type == BOARDOBJ_GET_TYPE(pchannel));
99}
100
101static u32 _pwr_domains_pmudatainit_sensor(struct gk20a *g,
102 struct boardobj *board_obj_ptr,
103 struct nv_pmu_boardobj *ppmudata)
104{
105 struct nv_pmu_pmgr_pwr_channel_sensor *pmu_sensor_data;
106 struct pwr_channel_sensor *sensor;
107 u32 status = 0;
108
109 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
110 if (status) {
111 gk20a_err(dev_from_gk20a(g),
112 "error updating pmu boardobjgrp for pwr sensor 0x%x",
113 status);
114 goto done;
115 }
116
117 sensor = (struct pwr_channel_sensor *)board_obj_ptr;
118 pmu_sensor_data = (struct nv_pmu_pmgr_pwr_channel_sensor *) ppmudata;
119
120 pmu_sensor_data->super.pwr_rail = sensor->super.pwr_rail;
121 pmu_sensor_data->super.volt_fixedu_v = sensor->super.volt_fixed_uv;
122 pmu_sensor_data->super.pwr_corr_slope = sensor->super.pwr_corr_slope;
123 pmu_sensor_data->super.pwr_corr_offsetm_w = sensor->super.pwr_corr_offset_mw;
124 pmu_sensor_data->super.curr_corr_slope = sensor->super.curr_corr_slope;
125 pmu_sensor_data->super.curr_corr_offsetm_a = sensor->super.curr_corr_offset_ma;
126 pmu_sensor_data->super.dependent_ch_mask = sensor->super.dependent_ch_mask;
127 pmu_sensor_data->super.ch_idx = 0;
128
129 pmu_sensor_data->pwr_dev_idx = sensor->pwr_dev_idx;
130 pmu_sensor_data->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx;
131
132done:
133 return status;
134}
135
136static struct boardobj *construct_pwr_topology(struct gk20a *g,
137 void *pargs, u16 pargs_size, u8 type)
138{
139 struct boardobj *board_obj_ptr = NULL;
140 u32 status;
141 struct pwr_channel_sensor *pwrchannel;
142 struct pwr_channel_sensor *sensor = (struct pwr_channel_sensor*)pargs;
143
144 status = boardobj_construct_super(g, &board_obj_ptr,
145 pargs_size, pargs);
146 if (status)
147 return NULL;
148
149 pwrchannel = (struct pwr_channel_sensor*)board_obj_ptr;
150
151 /* Set Super class interfaces */
152 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_sensor;
153
154 pwrchannel->super.pwr_rail = sensor->super.pwr_rail;
155 pwrchannel->super.volt_fixed_uv = sensor->super.volt_fixed_uv;
156 pwrchannel->super.pwr_corr_slope = sensor->super.pwr_corr_slope;
157 pwrchannel->super.pwr_corr_offset_mw = sensor->super.pwr_corr_offset_mw;
158 pwrchannel->super.curr_corr_slope = sensor->super.curr_corr_slope;
159 pwrchannel->super.curr_corr_offset_ma = sensor->super.curr_corr_offset_ma;
160 pwrchannel->super.dependent_ch_mask = 0;
161
162 pwrchannel->pwr_dev_idx = sensor->pwr_dev_idx;
163 pwrchannel->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx;
164
165 gk20a_dbg_info(" Done");
166
167 return board_obj_ptr;
168}
169
170static u32 devinit_get_pwr_topology_table(struct gk20a *g,
171 struct pmgr_pwr_monitor *ppwrmonitorobjs)
172{
173 u32 status = 0;
174 u8 *pwr_topology_table_ptr = NULL;
175 u8 *curr_pwr_topology_table_ptr = NULL;
176 struct boardobj *boardobj;
177 struct pwr_topology_2x_header pwr_topology_table_header = { 0 };
178 struct pwr_topology_2x_entry pwr_topology_table_entry = { 0 };
179 u32 index;
180 u32 obj_index = 0;
181 u16 pwr_topology_size;
182 union {
183 struct boardobj boardobj;
184 struct pwr_channel pwrchannel;
185 struct pwr_channel_sensor sensor;
186 } pwr_topology_data;
187
188 gk20a_dbg_info("");
189
190 if (g->ops.bios.get_perf_table_ptrs != NULL) {
191 pwr_topology_table_ptr = (u8 *)g->ops.bios.get_perf_table_ptrs(g,
192 g->bios.perf_token, POWER_TOPOLOGY_TABLE);
193 if (pwr_topology_table_ptr == NULL) {
194 status = -EINVAL;
195 goto done;
196 }
197 }
198
199 memcpy(&pwr_topology_table_header, pwr_topology_table_ptr,
200 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06);
201
202 if (pwr_topology_table_header.version !=
203 VBIOS_POWER_TOPOLOGY_VERSION_2X) {
204 status = -EINVAL;
205 goto done;
206 }
207
208 g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = false;
209
210 if (pwr_topology_table_header.header_size <
211 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06) {
212 status = -EINVAL;
213 goto done;
214 }
215
216 if (pwr_topology_table_header.table_entry_size !=
217 VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16) {
218 status = -EINVAL;
219 goto done;
220 }
221
222 curr_pwr_topology_table_ptr = (pwr_topology_table_ptr +
223 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06);
224
225 for (index = 0; index < pwr_topology_table_header.num_table_entries;
226 index++) {
227 u8 class_type;
228
229 curr_pwr_topology_table_ptr += (pwr_topology_table_header.table_entry_size * index);
230
231 pwr_topology_table_entry.flags0 = *curr_pwr_topology_table_ptr;
232 pwr_topology_table_entry.pwr_rail = *(curr_pwr_topology_table_ptr + 1);
233
234 memcpy(&pwr_topology_table_entry.param0,
235 (curr_pwr_topology_table_ptr + 2),
236 (VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16 - 2));
237
238 class_type = (u8)BIOS_GET_FIELD(
239 pwr_topology_table_entry.flags0,
240 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS);
241
242 if (class_type == NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SENSOR) {
243 pwr_topology_data.sensor.pwr_dev_idx = (u8)BIOS_GET_FIELD(
244 pwr_topology_table_entry.param1,
245 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX);
246 pwr_topology_data.sensor.pwr_dev_prov_idx = (u8)BIOS_GET_FIELD(
247 pwr_topology_table_entry.param1,
248 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX);
249
250 pwr_topology_size = sizeof(struct pwr_channel_sensor);
251 } else
252 continue;
253
254 /* Initialize data for the parent class */
255 pwr_topology_data.boardobj.type = CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR;
256 pwr_topology_data.pwrchannel.pwr_rail = (u8)pwr_topology_table_entry.pwr_rail;
257 pwr_topology_data.pwrchannel.volt_fixed_uv = pwr_topology_table_entry.param0;
258 pwr_topology_data.pwrchannel.pwr_corr_slope = (1 << 12);
259 pwr_topology_data.pwrchannel.pwr_corr_offset_mw = 0;
260 pwr_topology_data.pwrchannel.curr_corr_slope =
261 (u32)pwr_topology_table_entry.curr_corr_slope;
262 pwr_topology_data.pwrchannel.curr_corr_offset_ma =
263 (s32)pwr_topology_table_entry.curr_corr_offset;
264
265 boardobj = construct_pwr_topology(g, &pwr_topology_data,
266 pwr_topology_size, pwr_topology_data.boardobj.type);
267
268 if (!boardobj) {
269 gk20a_err(dev_from_gk20a(g),
270 "unable to create pwr topology for %d type %d",
271 index, pwr_topology_data.boardobj.type);
272 status = -EINVAL;
273 goto done;
274 }
275
276 status = boardobjgrp_objinsert(&ppwrmonitorobjs->pwr_channels.super,
277 boardobj, obj_index);
278
279 if (status) {
280 gk20a_err(dev_from_gk20a(g),
281 "unable to insert pwr topology boardobj for %d", index);
282 status = -EINVAL;
283 goto done;
284 }
285
286 ++obj_index;
287 }
288
289done:
290 gk20a_dbg_info(" done status %x", status);
291 return status;
292}
293
294u32 pmgr_monitor_sw_setup(struct gk20a *g)
295{
296 u32 status;
297 struct boardobjgrp *pboardobjgrp = NULL;
298 struct pwr_channel *pchannel;
299 struct pmgr_pwr_monitor *ppwrmonitorobjs;
300 u8 indx = 0;
301
302 /* Construct the Super Class and override the Interfaces */
303 status = boardobjgrpconstruct_e32(
304 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels);
305 if (status) {
306 gk20a_err(dev_from_gk20a(g),
307 "error creating boardobjgrp for pmgr channel, status - 0x%x",
308 status);
309 goto done;
310 }
311
312 pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super);
313
314 /* Override the Interfaces */
315 pboardobjgrp->pmudatainstget = _pwr_channel_pmudata_instget;
316
317 /* Construct the Super Class and override the Interfaces */
318 status = boardobjgrpconstruct_e32(
319 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels);
320 if (status) {
321 gk20a_err(dev_from_gk20a(g),
322 "error creating boardobjgrp for pmgr channel relationship, status - 0x%x",
323 status);
324 goto done;
325 }
326
327 pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super);
328
329 /* Override the Interfaces */
330 pboardobjgrp->pmudatainstget = _pwr_channel_rels_pmudata_instget;
331
332 /* Initialize the Total GPU Power Channel Mask to 0 */
333 g->pmgr_pmu.pmgr_monitorobjs.pmu_data.channels.hdr.data.total_gpu_power_channel_mask = 0;
334 g->pmgr_pmu.pmgr_monitorobjs.total_gpu_channel_idx =
335 CTRL_PMGR_PWR_CHANNEL_INDEX_INVALID;
336
337 /* Supported topology table version 1.0 */
338 g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = true;
339
340 ppwrmonitorobjs = &(g->pmgr_pmu.pmgr_monitorobjs);
341
342 status = devinit_get_pwr_topology_table(g, ppwrmonitorobjs);
343 if (status)
344 goto done;
345
346 status = _pwr_channel_state_init(g);
347 if (status)
348 goto done;
349
350 /* Initialise physicalChannelMask */
351 g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask = 0;
352
353 pboardobjgrp = &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super;
354
355 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_channel *, pchannel, indx) {
356 if (_pwr_channel_implements(pchannel,
357 CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR)) {
358 g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask |= BIT(indx);
359 }
360 }
361
362done:
363 gk20a_dbg_info(" done status %x", status);
364 return status;
365}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.h b/drivers/gpu/nvgpu/pmgr/pwrmonitor.h
new file mode 100644
index 00000000..7cd6b8c9
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.h
@@ -0,0 +1,60 @@
1/*
2 * general power channel structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PWRMONITOR_H_
16#define _PWRMONITOR_H_
17
18#include "boardobj/boardobjgrp.h"
19#include "boardobj/boardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "ctrl/ctrlpmgr.h"
22
23struct pwr_channel {
24 struct boardobj super;
25 u8 pwr_rail;
26 u32 volt_fixed_uv;
27 u32 pwr_corr_slope;
28 s32 pwr_corr_offset_mw;
29 u32 curr_corr_slope;
30 s32 curr_corr_offset_ma;
31 u32 dependent_ch_mask;
32};
33
34struct pwr_chrelationship {
35 struct boardobj super;
36 u8 chIdx;
37};
38
39struct pwr_channel_sensor {
40 struct pwr_channel super;
41 u8 pwr_dev_idx;
42 u8 pwr_dev_prov_idx;
43};
44
45struct pmgr_pwr_monitor {
46 bool b_is_topology_tbl_ver_1x;
47 struct boardobjgrp_e32 pwr_channels;
48 struct boardobjgrp_e32 pwr_ch_rels;
49 u8 total_gpu_channel_idx;
50 u32 physical_channel_mask;
51 struct nv_pmu_pmgr_pwr_monitor_pack pmu_data;
52};
53
54#define PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, channel_idx) \
55 ((struct pwr_channel *)BOARDOBJGRP_OBJ_GET_BY_IDX( \
56 &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super), (channel_idx)))
57
58u32 pmgr_monitor_sw_setup(struct gk20a *g);
59
60#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
new file mode 100644
index 00000000..bec13b0c
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
@@ -0,0 +1,680 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrpolicy.h"
16#include "include/bios.h"
17#include "boardobj/boardobjgrp.h"
18#include "boardobj/boardobjgrp_e32.h"
19#include "pmuif/gpmuifboardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "gm206/bios_gm206.h"
22#include "gk20a/pmu_gk20a.h"
23
24#define _pwr_policy_limitarboutputget_helper(p_limit_arb) (p_limit_arb)->output
25#define _pwr_policy_limitdeltaapply(limit, delta) ((u32)max(((s32)limit) + (delta), 0))
26
27static u32 _pwr_policy_limitarbinputset_helper(struct gk20a *g,
28 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb,
29 u8 client_idx,
30 u32 limit_value)
31{
32 u8 indx;
33 bool b_found = false;
34 u32 status = 0;
35 u32 output = limit_value;
36
37 for (indx = 0; indx< p_limit_arb->num_inputs; indx++) {
38 if (p_limit_arb->inputs[indx].pwr_policy_idx == client_idx) {
39 p_limit_arb->inputs[indx].limit_value = limit_value;
40 b_found = true;
41 } else if (p_limit_arb->b_arb_max) {
42 output = max(output, p_limit_arb->inputs[indx].limit_value);
43 } else {
44 output = min(output, p_limit_arb->inputs[indx].limit_value);
45 }
46 }
47
48 if (!b_found) {
49 if (p_limit_arb->num_inputs <
50 CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS) {
51 p_limit_arb->inputs[
52 p_limit_arb->num_inputs].pwr_policy_idx = client_idx;
53 p_limit_arb->inputs[
54 p_limit_arb->num_inputs].limit_value = limit_value;
55 p_limit_arb->num_inputs++;
56 } else {
57 gk20a_err(g->dev, "No entries remaining for clientIdx=%d",
58 client_idx);
59 status = -EINVAL;
60 }
61 }
62
63 if (!status) {
64 p_limit_arb->output = output;
65 }
66
67 return status;
68}
69
70static u32 _pwr_policy_limitid_translate(struct gk20a *g,
71 struct pwr_policy *ppolicy,
72 enum pwr_policy_limit_id limit_id,
73 struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb,
74 struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb_sec)
75{
76 u32 status = 0;
77
78 switch (limit_id) {
79 case PWR_POLICY_LIMIT_ID_MIN:
80 *p_limit_arb = &ppolicy->limit_arb_min;
81 break;
82
83 case PWR_POLICY_LIMIT_ID_RATED:
84 *p_limit_arb = &ppolicy->limit_arb_rated;
85
86 if (p_limit_arb_sec != NULL) {
87 *p_limit_arb_sec = &ppolicy->limit_arb_curr;
88 }
89 break;
90
91 case PWR_POLICY_LIMIT_ID_MAX:
92 *p_limit_arb = &ppolicy->limit_arb_max;
93 break;
94
95 case PWR_POLICY_LIMIT_ID_CURR:
96 *p_limit_arb = &ppolicy->limit_arb_curr;
97 break;
98
99 case PWR_POLICY_LIMIT_ID_BATT:
100 *p_limit_arb = &ppolicy->limit_arb_batt;
101 break;
102
103 default:
104 gk20a_err(g->dev, "Unsupported limitId=%d",
105 limit_id);
106 status = -EINVAL;
107 break;
108 }
109
110 return status;
111}
112
113static u32 _pwr_policy_limitarbinputset(struct gk20a *g,
114 struct pwr_policy *ppolicy,
115 enum pwr_policy_limit_id limit_id,
116 u8 client_idx,
117 u32 limit)
118{
119 u32 status = 0;
120 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL;
121 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb_sec = NULL;
122
123 status = _pwr_policy_limitid_translate(g,
124 ppolicy,
125 limit_id,
126 &p_limit_arb,
127 &p_limit_arb_sec);
128 if (status) {
129 goto exit;
130 }
131
132 status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb, client_idx, limit);
133 if (status) {
134 gk20a_err(g->dev,
135 "Error setting client limit value: status=0x%08x, limitId=0x%x, clientIdx=0x%x, limit=%d",
136 status, limit_id, client_idx, limit);
137 goto exit;
138 }
139
140 if (NULL != p_limit_arb_sec) {
141 status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb_sec,
142 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
143 _pwr_policy_limitarboutputget_helper(p_limit_arb));
144 }
145
146exit:
147 return status;
148}
149
150static inline void _pwr_policy_limitarbconstruct(
151 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb,
152 bool b_arb_max)
153{
154 p_limit_arb->num_inputs = 0;
155 p_limit_arb->b_arb_max = b_arb_max;
156}
157
158static u32 _pwr_policy_limitarboutputget(struct gk20a *g,
159 struct pwr_policy *ppolicy,
160 enum pwr_policy_limit_id limit_id)
161{
162 u32 status = 0;
163 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL;
164
165 status = _pwr_policy_limitid_translate(g,
166 ppolicy,
167 limit_id,
168 &p_limit_arb,
169 NULL);
170 if (status) {
171 return 0;
172 }
173
174 return _pwr_policy_limitarboutputget_helper(p_limit_arb);
175}
176
177static u32 _pwr_domains_pmudatainit_hw_threshold(struct gk20a *g,
178 struct boardobj *board_obj_ptr,
179 struct nv_pmu_boardobj *ppmudata)
180{
181 struct nv_pmu_pmgr_pwr_policy_hw_threshold *pmu_hw_threshold_data;
182 struct pwr_policy_hw_threshold *p_hw_threshold;
183 struct pwr_policy *p_pwr_policy;
184 struct nv_pmu_pmgr_pwr_policy *pmu_pwr_policy;
185 u32 status = 0;
186
187 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
188 if (status) {
189 gk20a_err(dev_from_gk20a(g),
190 "error updating pmu boardobjgrp for pwr sensor 0x%x",
191 status);
192 status = -ENOMEM;
193 goto done;
194 }
195
196 p_hw_threshold = (struct pwr_policy_hw_threshold *)board_obj_ptr;
197 pmu_hw_threshold_data = (struct nv_pmu_pmgr_pwr_policy_hw_threshold *) ppmudata;
198 pmu_pwr_policy = (struct nv_pmu_pmgr_pwr_policy *) ppmudata;
199 p_pwr_policy = (struct pwr_policy *)&(p_hw_threshold->super.super);
200
201 pmu_pwr_policy->ch_idx = 0;
202 pmu_pwr_policy->limit_unit = p_pwr_policy->limit_unit;
203 pmu_pwr_policy->num_limit_inputs = p_pwr_policy->num_limit_inputs;
204
205 pmu_pwr_policy->limit_min = _pwr_policy_limitdeltaapply(
206 _pwr_policy_limitarboutputget(g, p_pwr_policy,
207 PWR_POLICY_LIMIT_ID_MIN),
208 p_pwr_policy->limit_delta);
209
210 pmu_pwr_policy->limit_max = _pwr_policy_limitdeltaapply(
211 _pwr_policy_limitarboutputget(g, p_pwr_policy,
212 PWR_POLICY_LIMIT_ID_MAX),
213 p_pwr_policy->limit_delta);
214
215 pmu_pwr_policy->limit_curr = _pwr_policy_limitdeltaapply(
216 _pwr_policy_limitarboutputget(g, p_pwr_policy,
217 PWR_POLICY_LIMIT_ID_CURR),
218 p_pwr_policy->limit_delta);
219
220 memcpy(&pmu_pwr_policy->integral, &p_pwr_policy->integral,
221 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
222
223 pmu_pwr_policy->sample_mult = p_pwr_policy->sample_mult;
224 pmu_pwr_policy->filter_type = p_pwr_policy->filter_type;
225 pmu_pwr_policy->filter_param = p_pwr_policy->filter_param;
226
227 pmu_hw_threshold_data->threshold_idx = p_hw_threshold->threshold_idx;
228 pmu_hw_threshold_data->low_threshold_idx = p_hw_threshold->low_threshold_idx;
229 pmu_hw_threshold_data->b_use_low_threshold = p_hw_threshold->b_use_low_threshold;
230 pmu_hw_threshold_data->low_threshold_value = p_hw_threshold->low_threshold_value;
231
232done:
233 return status;
234}
235
236static struct boardobj *construct_pwr_policy(struct gk20a *g,
237 void *pargs, u16 pargs_size, u8 type)
238{
239 struct boardobj *board_obj_ptr = NULL;
240 u32 status;
241 struct pwr_policy_hw_threshold *pwrpolicyhwthreshold;
242 struct pwr_policy *pwrpolicy;
243 struct pwr_policy *pwrpolicyparams = (struct pwr_policy*)pargs;
244 struct pwr_policy_hw_threshold *hwthreshold = (struct pwr_policy_hw_threshold*)pargs;
245
246 status = boardobj_construct_super(g, &board_obj_ptr,
247 pargs_size, pargs);
248 if (status)
249 return NULL;
250
251 pwrpolicyhwthreshold = (struct pwr_policy_hw_threshold*)board_obj_ptr;
252 pwrpolicy = (struct pwr_policy *)board_obj_ptr;
253
254 /* Set Super class interfaces */
255 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_hw_threshold;
256
257 pwrpolicy->ch_idx = pwrpolicyparams->ch_idx;
258 pwrpolicy->num_limit_inputs = 0;
259 pwrpolicy->limit_unit = pwrpolicyparams->limit_unit;
260 pwrpolicy->filter_type = (enum ctrl_pmgr_pwr_policy_filter_type)(pwrpolicyparams->filter_type);
261 pwrpolicy->sample_mult = pwrpolicyparams->sample_mult;
262 switch (pwrpolicy->filter_type)
263 {
264 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_NONE:
265 break;
266
267 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_BLOCK:
268 pwrpolicy->filter_param.block.block_size =
269 pwrpolicyparams->filter_param.block.block_size;
270 break;
271
272 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE:
273 pwrpolicy->filter_param.moving_avg.window_size =
274 pwrpolicyparams->filter_param.moving_avg.window_size;
275 break;
276
277 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_IIR:
278 pwrpolicy->filter_param.iir.divisor = pwrpolicyparams->filter_param.iir.divisor;
279 break;
280
281 default:
282 gk20a_err(g->dev,
283 "Error: unrecognized Power Policy filter type: %d.\n",
284 pwrpolicy->filter_type);
285 }
286
287 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_curr, false);
288
289 pwrpolicy->limit_delta = 0;
290
291 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_min, true);
292 status = _pwr_policy_limitarbinputset(g,
293 pwrpolicy,
294 PWR_POLICY_LIMIT_ID_MIN,
295 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
296 pwrpolicyparams->limit_min);
297
298 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_max, false);
299 status = _pwr_policy_limitarbinputset(g,
300 pwrpolicy,
301 PWR_POLICY_LIMIT_ID_MAX,
302 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
303 pwrpolicyparams->limit_max);
304
305 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_rated, false);
306 status = _pwr_policy_limitarbinputset(g,
307 pwrpolicy,
308 PWR_POLICY_LIMIT_ID_RATED,
309 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
310 pwrpolicyparams->limit_rated);
311
312 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_batt, false);
313 status = _pwr_policy_limitarbinputset(g,
314 pwrpolicy,
315 PWR_POLICY_LIMIT_ID_BATT,
316 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
317 ((pwrpolicyparams->limit_batt != 0) ?
318 pwrpolicyparams->limit_batt:
319 CTRL_PMGR_PWR_POLICY_LIMIT_MAX));
320
321 memcpy(&pwrpolicy->integral, &pwrpolicyparams->integral,
322 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
323
324 pwrpolicyhwthreshold->threshold_idx = hwthreshold->threshold_idx;
325 pwrpolicyhwthreshold->b_use_low_threshold = hwthreshold->b_use_low_threshold;
326 pwrpolicyhwthreshold->low_threshold_idx = hwthreshold->low_threshold_idx;
327 pwrpolicyhwthreshold->low_threshold_value = hwthreshold->low_threshold_value;
328
329 gk20a_dbg_info(" Done");
330
331 return board_obj_ptr;
332}
333
334static u32 _pwr_policy_construct_WAR_policy(struct gk20a *g,
335 struct pmgr_pwr_policy *ppwrpolicyobjs,
336 union pwr_policy_data_union *ppwrpolicydata,
337 u16 pwr_policy_size,
338 u32 hw_threshold_policy_index,
339 u32 obj_index)
340{
341 u32 status = 0;
342 struct boardobj *boardobj;
343
344 if (!(hw_threshold_policy_index & 0x1)) {
345 /* CRIT policy */
346 ppwrpolicydata->pwrpolicy.limit_min = 1000;
347 ppwrpolicydata->pwrpolicy.limit_rated = 20000;
348 ppwrpolicydata->pwrpolicy.limit_max = 20000;
349 ppwrpolicydata->hw_threshold.threshold_idx = 0;
350 } else {
351 /* WARN policy */
352 ppwrpolicydata->pwrpolicy.limit_min = 1000;
353 ppwrpolicydata->pwrpolicy.limit_rated = 11600;
354 ppwrpolicydata->pwrpolicy.limit_max = 11600;
355 ppwrpolicydata->hw_threshold.threshold_idx = 1;
356 }
357
358 boardobj = construct_pwr_policy(g, ppwrpolicydata,
359 pwr_policy_size, ppwrpolicydata->boardobj.type);
360
361 if (!boardobj) {
362 gk20a_err(dev_from_gk20a(g),
363 "unable to create pwr policy for type %d", ppwrpolicydata->boardobj.type);
364 status = -EINVAL;
365 goto done;
366 }
367
368 status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
369 boardobj, obj_index);
370
371 if (status) {
372 gk20a_err(dev_from_gk20a(g),
373 "unable to insert pwr policy boardobj for %d", obj_index);
374 status = -EINVAL;
375 goto done;
376 }
377done:
378 return status;
379}
380
381static u32 devinit_get_pwr_policy_table(struct gk20a *g,
382 struct pmgr_pwr_policy *ppwrpolicyobjs)
383{
384 u32 status = 0;
385 u8 *pwr_policy_table_ptr = NULL;
386 u8 *curr_pwr_policy_table_ptr = NULL;
387 struct boardobj *boardobj;
388 struct pwr_policy_3x_header_struct pwr_policy_table_header = { 0 };
389 struct pwr_policy_3x_entry_struct pwr_policy_table_entry = { 0 };
390 u32 index;
391 u32 obj_index = 0;
392 u16 pwr_policy_size;
393 bool integral_control = false;
394 u32 hw_threshold_policy_index = 0;
395 union pwr_policy_data_union pwr_policy_data;
396
397 gk20a_dbg_info("");
398
399 if (g->ops.bios.get_perf_table_ptrs != NULL) {
400 pwr_policy_table_ptr = (u8 *)g->ops.bios.get_perf_table_ptrs(g,
401 g->bios.perf_token, POWER_CAPPING_TABLE);
402 if (pwr_policy_table_ptr == NULL) {
403 status = -EINVAL;
404 goto done;
405 }
406 }
407
408 memcpy(&pwr_policy_table_header.version,
409 (pwr_policy_table_ptr),
410 14);
411
412 memcpy(&pwr_policy_table_header.d2_limit,
413 (pwr_policy_table_ptr + 14),
414 (VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E - 14));
415
416 if (pwr_policy_table_header.version !=
417 VBIOS_POWER_POLICY_VERSION_3X) {
418 status = -EINVAL;
419 goto done;
420 }
421
422 if (pwr_policy_table_header.header_size <
423 VBIOS_POWER_POLICY_3X_HEADER_SIZE_25) {
424 status = -EINVAL;
425 goto done;
426 }
427
428 if (pwr_policy_table_header.table_entry_size !=
429 VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E) {
430 status = -EINVAL;
431 goto done;
432 }
433
434 curr_pwr_policy_table_ptr = (pwr_policy_table_ptr +
435 VBIOS_POWER_POLICY_3X_HEADER_SIZE_25);
436
437 for (index = 0; index < pwr_policy_table_header.num_table_entries;
438 index++) {
439 u8 class_type;
440
441 curr_pwr_policy_table_ptr += (pwr_policy_table_header.table_entry_size * index);
442
443 pwr_policy_table_entry.flags0 = *curr_pwr_policy_table_ptr;
444 pwr_policy_table_entry.ch_idx = *(curr_pwr_policy_table_ptr + 1);
445
446 memcpy(&pwr_policy_table_entry.limit_min,
447 (curr_pwr_policy_table_ptr + 2),
448 35);
449
450 memcpy(&pwr_policy_table_entry.ratio_min,
451 (curr_pwr_policy_table_ptr + 2 + 35),
452 4);
453
454 pwr_policy_table_entry.sample_mult =
455 *(curr_pwr_policy_table_ptr + 2 + 35 + 4);
456
457 memcpy(&pwr_policy_table_entry.filter_param,
458 (curr_pwr_policy_table_ptr + 2 + 35 + 4 + 1),
459 4);
460
461 class_type = (u8)BIOS_GET_FIELD(
462 pwr_policy_table_entry.flags0,
463 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS);
464
465 if (class_type == NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_HW_THRESHOLD) {
466 ppwrpolicyobjs->version = CTRL_PMGR_PWR_POLICY_TABLE_VERSION_3X;
467 ppwrpolicyobjs->base_sample_period = (u16)
468 pwr_policy_table_header.base_sample_period;
469 ppwrpolicyobjs->min_client_sample_period = (u16)
470 pwr_policy_table_header.min_client_sample_period;
471 ppwrpolicyobjs->low_sampling_mult =
472 pwr_policy_table_header.low_sampling_mult;
473
474 ppwrpolicyobjs->policy_idxs[1] =
475 (u8)pwr_policy_table_header.tgp_policy_idx;
476 ppwrpolicyobjs->policy_idxs[0] =
477 (u8)pwr_policy_table_header.rtp_policy_idx;
478 ppwrpolicyobjs->policy_idxs[2] =
479 pwr_policy_table_header.mxm_policy_idx;
480 ppwrpolicyobjs->policy_idxs[3] =
481 pwr_policy_table_header.dnotifier_policy_idx;
482 ppwrpolicyobjs->ext_limits[0].limit =
483 pwr_policy_table_header.d2_limit;
484 ppwrpolicyobjs->ext_limits[1].limit =
485 pwr_policy_table_header.d3_limit;
486 ppwrpolicyobjs->ext_limits[2].limit =
487 pwr_policy_table_header.d4_limit;
488 ppwrpolicyobjs->ext_limits[3].limit =
489 pwr_policy_table_header.d5_limit;
490 ppwrpolicyobjs->policy_idxs[4] =
491 pwr_policy_table_header.pwr_tgt_policy_idx;
492 ppwrpolicyobjs->policy_idxs[5] =
493 pwr_policy_table_header.pwr_tgt_floor_policy_idx;
494 ppwrpolicyobjs->policy_idxs[6] =
495 pwr_policy_table_header.sm_bus_policy_idx;
496
497 integral_control = (bool)BIOS_GET_FIELD(
498 pwr_policy_table_entry.flags1,
499 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL);
500
501 if (integral_control == 0x01) {
502 pwr_policy_data.pwrpolicy.integral.past_sample_count = (u8)
503 pwr_policy_table_entry.past_length;
504 pwr_policy_data.pwrpolicy.integral.next_sample_count = (u8)
505 pwr_policy_table_entry.next_length;
506 pwr_policy_data.pwrpolicy.integral.ratio_limit_max = (u16)
507 pwr_policy_table_entry.ratio_max;
508 pwr_policy_data.pwrpolicy.integral.ratio_limit_min = (u16)
509 pwr_policy_table_entry.ratio_min;
510 } else {
511 memset(&(pwr_policy_data.pwrpolicy.integral), 0x0,
512 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
513 }
514 pwr_policy_data.hw_threshold.threshold_idx = (u8)
515 BIOS_GET_FIELD(
516 pwr_policy_table_entry.param0,
517 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX);
518
519 pwr_policy_data.hw_threshold.b_use_low_threshold =
520 BIOS_GET_FIELD(
521 pwr_policy_table_entry.param0,
522 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE);
523
524 if (pwr_policy_data.hw_threshold.b_use_low_threshold) {
525 pwr_policy_data.hw_threshold.low_threshold_idx = (u8)
526 BIOS_GET_FIELD(
527 pwr_policy_table_entry.param0,
528 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX);
529
530 pwr_policy_data.hw_threshold.low_threshold_value = (u16)
531 BIOS_GET_FIELD(
532 pwr_policy_table_entry.param1,
533 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL);
534 }
535
536 pwr_policy_size = sizeof(struct pwr_policy_hw_threshold);
537 } else
538 continue;
539
540 /* Initialize data for the parent class */
541 pwr_policy_data.boardobj.type = CTRL_PMGR_PWR_POLICY_TYPE_HW_THRESHOLD;
542 pwr_policy_data.pwrpolicy.ch_idx = (u8)pwr_policy_table_entry.ch_idx;
543 pwr_policy_data.pwrpolicy.limit_unit = (u8)
544 BIOS_GET_FIELD(
545 pwr_policy_table_entry.flags0,
546 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT);
547 pwr_policy_data.pwrpolicy.filter_type = (u8)
548 BIOS_GET_FIELD(
549 pwr_policy_table_entry.flags1,
550 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE);
551 pwr_policy_data.pwrpolicy.limit_min = pwr_policy_table_entry.limit_min;
552 pwr_policy_data.pwrpolicy.limit_rated = pwr_policy_table_entry.limit_rated;
553 pwr_policy_data.pwrpolicy.limit_max = pwr_policy_table_entry.limit_max;
554 pwr_policy_data.pwrpolicy.limit_batt = pwr_policy_table_entry.limit_batt;
555
556 pwr_policy_data.pwrpolicy.sample_mult = (u8)pwr_policy_table_entry.sample_mult;
557
558 /* Filled the entry.filterParam value in the filterParam */
559 pwr_policy_data.pwrpolicy.filter_param.block.block_size = 0;
560 pwr_policy_data.pwrpolicy.filter_param.moving_avg.window_size = 0;
561 pwr_policy_data.pwrpolicy.filter_param.iir.divisor = 0;
562
563 hw_threshold_policy_index |=
564 BIT(pwr_policy_data.hw_threshold.threshold_idx);
565
566 boardobj = construct_pwr_policy(g, &pwr_policy_data,
567 pwr_policy_size, pwr_policy_data.boardobj.type);
568
569 if (!boardobj) {
570 gk20a_err(dev_from_gk20a(g),
571 "unable to create pwr policy for %d type %d", index, pwr_policy_data.boardobj.type);
572 status = -EINVAL;
573 goto done;
574 }
575
576 status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
577 boardobj, obj_index);
578
579 if (status) {
580 gk20a_err(dev_from_gk20a(g),
581 "unable to insert pwr policy boardobj for %d", index);
582 status = -EINVAL;
583 goto done;
584 }
585
586 ++obj_index;
587 }
588
589 if (hw_threshold_policy_index &&
590 (hw_threshold_policy_index < 0x3)) {
591 status = _pwr_policy_construct_WAR_policy(g,
592 ppwrpolicyobjs,
593 &pwr_policy_data,
594 pwr_policy_size,
595 hw_threshold_policy_index,
596 obj_index);
597 if (status) {
598 gk20a_err(dev_from_gk20a(g),
599 "unable to construct_WAR_policy");
600 status = -EINVAL;
601 goto done;
602 }
603 ++obj_index;
604 }
605
606done:
607 gk20a_dbg_info(" done status %x", status);
608 return status;
609}
610
611u32 pmgr_policy_sw_setup(struct gk20a *g)
612{
613 u32 status;
614 struct boardobjgrp *pboardobjgrp = NULL;
615 struct pwr_policy *ppolicy;
616 struct pmgr_pwr_policy *ppwrpolicyobjs;
617 u8 indx = 0;
618
619 /* Construct the Super Class and override the Interfaces */
620 status = boardobjgrpconstruct_e32(
621 &g->pmgr_pmu.pmgr_policyobjs.pwr_policies);
622 if (status) {
623 gk20a_err(dev_from_gk20a(g),
624 "error creating boardobjgrp for pmgr policy, status - 0x%x",
625 status);
626 goto done;
627 }
628
629 status = boardobjgrpconstruct_e32(
630 &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels);
631 if (status) {
632 gk20a_err(dev_from_gk20a(g),
633 "error creating boardobjgrp for pmgr policy rels, status - 0x%x",
634 status);
635 goto done;
636 }
637
638 status = boardobjgrpconstruct_e32(
639 &g->pmgr_pmu.pmgr_policyobjs.pwr_violations);
640 if (status) {
641 gk20a_err(dev_from_gk20a(g),
642 "error creating boardobjgrp for pmgr violations, status - 0x%x",
643 status);
644 goto done;
645 }
646
647 memset(g->pmgr_pmu.pmgr_policyobjs.policy_idxs, CTRL_PMGR_PWR_POLICY_INDEX_INVALID,
648 sizeof(u8) * CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES);
649
650 /* Initialize external power limit policy indexes to _INVALID/0xFF */
651 for (indx = 0; indx < PWR_POLICY_EXT_POWER_STATE_ID_COUNT; indx++) {
652 g->pmgr_pmu.pmgr_policyobjs.ext_limits[indx].policy_table_idx =
653 CTRL_PMGR_PWR_POLICY_INDEX_INVALID;
654 }
655
656 /* Initialize external power state to _D1 */
657 g->pmgr_pmu.pmgr_policyobjs.ext_power_state = 0xFFFFFFFF;
658
659 ppwrpolicyobjs = &(g->pmgr_pmu.pmgr_policyobjs);
660 pboardobjgrp = &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super);
661
662 status = devinit_get_pwr_policy_table(g, ppwrpolicyobjs);
663 if (status)
664 goto done;
665
666 g->pmgr_pmu.pmgr_policyobjs.b_enabled = true;
667
668 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_policy *, ppolicy, indx) {
669 PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy);
670 }
671
672 g->pmgr_pmu.pmgr_policyobjs.global_ceiling.values[0] =
673 0xFF;
674
675 g->pmgr_pmu.pmgr_policyobjs.client_work_item.b_pending = false;
676
677done:
678 gk20a_dbg_info(" done status %x", status);
679 return status;
680}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.h b/drivers/gpu/nvgpu/pmgr/pwrpolicy.h
new file mode 100644
index 00000000..82289137
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.h
@@ -0,0 +1,117 @@
1/*
2 * general power channel structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PWRPOLICY_H_
16#define _PWRPOLICY_H_
17
18#include "boardobj/boardobjgrp.h"
19#include "boardobj/boardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "ctrl/ctrlpmgr.h"
22
23#define PWR_POLICY_EXT_POWER_STATE_ID_COUNT 0x4
24
25enum pwr_policy_limit_id {
26 PWR_POLICY_LIMIT_ID_MIN = 0x00000000,
27 PWR_POLICY_LIMIT_ID_RATED,
28 PWR_POLICY_LIMIT_ID_MAX,
29 PWR_POLICY_LIMIT_ID_CURR,
30 PWR_POLICY_LIMIT_ID_BATT,
31};
32
33struct pwr_policy {
34 struct boardobj super;
35 u8 ch_idx;
36 u8 num_limit_inputs;
37 u8 limit_unit;
38 s32 limit_delta;
39 u32 limit_min;
40 u32 limit_rated;
41 u32 limit_max;
42 u32 limit_batt;
43 struct ctrl_pmgr_pwr_policy_info_integral integral;
44 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_min;
45 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_rated;
46 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_max;
47 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_batt;
48 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_curr;
49 u8 sample_mult;
50 enum ctrl_pmgr_pwr_policy_filter_type filter_type;
51 union ctrl_pmgr_pwr_policy_filter_param filter_param;
52};
53
54struct pwr_policy_ext_limit {
55 u8 policy_table_idx;
56 u32 limit;
57};
58
59struct pwr_policy_batt_workitem {
60 u32 power_state;
61 bool b_full_deflection;
62};
63
64struct pwr_policy_client_workitem {
65 u32 limit;
66 bool b_pending;
67};
68
69struct pwr_policy_relationship {
70 struct boardobj super;
71 u8 policy_idx;
72};
73
74struct pmgr_pwr_policy {
75 u8 version;
76 bool b_enabled;
77 struct nv_pmu_perf_domain_group_limits global_ceiling;
78 u8 policy_idxs[CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES];
79 struct pwr_policy_ext_limit ext_limits[PWR_POLICY_EXT_POWER_STATE_ID_COUNT];
80 s32 ext_power_state;
81 u16 base_sample_period;
82 u16 min_client_sample_period;
83 u8 low_sampling_mult;
84 struct boardobjgrp_e32 pwr_policies;
85 struct boardobjgrp_e32 pwr_policy_rels;
86 struct boardobjgrp_e32 pwr_violations;
87 struct pwr_policy_client_workitem client_work_item;
88};
89
90struct pwr_policy_limit {
91 struct pwr_policy super;
92};
93
94struct pwr_policy_hw_threshold {
95 struct pwr_policy_limit super;
96 u8 threshold_idx;
97 u8 low_threshold_idx;
98 bool b_use_low_threshold;
99 u16 low_threshold_value;
100};
101
102union pwr_policy_data_union {
103 struct boardobj boardobj;
104 struct pwr_policy pwrpolicy;
105 struct pwr_policy_hw_threshold hw_threshold;
106} ;
107
108#define PMGR_GET_PWR_POLICY(g, policy_idx) \
109 ((struct pwr_policy *)BOARDOBJGRP_OBJ_GET_BY_IDX( \
110 &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super), (policy_idx)))
111
112#define PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy) \
113 ((ppolicy)->num_limit_inputs++)
114
115u32 pmgr_policy_sw_setup(struct gk20a *g);
116
117#endif
diff --git a/drivers/gpu/nvgpu/pstate/pstate.c b/drivers/gpu/nvgpu/pstate/pstate.c
index d6173275..da1a49db 100644
--- a/drivers/gpu/nvgpu/pstate/pstate.c
+++ b/drivers/gpu/nvgpu/pstate/pstate.c
@@ -16,6 +16,7 @@
16#include "gk20a/gk20a.h" 16#include "gk20a/gk20a.h"
17#include "clk/clk.h" 17#include "clk/clk.h"
18#include "perf/perf.h" 18#include "perf/perf.h"
19#include "pmgr/pmgr.h"
19 20
20/*sw setup for pstate components*/ 21/*sw setup for pstate components*/
21int gk20a_init_pstate_support(struct gk20a *g) 22int gk20a_init_pstate_support(struct gk20a *g)
@@ -49,6 +50,10 @@ int gk20a_init_pstate_support(struct gk20a *g)
49 return err; 50 return err;
50 51
51 err = clk_prog_sw_setup(g); 52 err = clk_prog_sw_setup(g);
53 if (err)
54 return err;
55
56 err = pmgr_domain_sw_setup(g);
52 return err; 57 return err;
53} 58}
54 59
@@ -100,6 +105,10 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g)
100 return err; 105 return err;
101 106
102 err = clk_set_boot_fll_clk(g); 107 err = clk_set_boot_fll_clk(g);
108 if (err)
109 return err;
110
111 err = pmgr_domain_pmu_setup(g);
103 return err; 112 return err;
104} 113}
105 114