From 91390d857f6302f9c2923ec4188ea7e24ee537a2 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Thu, 9 Aug 2018 09:20:33 -0700 Subject: gpu: nvgpu: Move therm HAL to common Move implementation of therm HAL to common/therm. ELCG and BLCG code was embedded in gr HAL, so moved that code to therm. Bump gk20a code to gm20b. JIRA NVGPU-955 Change-Id: I9b03e52f2832d3a1d89071a577e8ce106aaf603b Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1795989 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/therm/therm.c | 46 +++++++ drivers/gpu/nvgpu/common/therm/therm_gm20b.c | 187 +++++++++++++++++++++++++++ drivers/gpu/nvgpu/common/therm/therm_gm20b.h | 33 +++++ drivers/gpu/nvgpu/common/therm/therm_gp106.c | 144 +++++++++++++++++++++ drivers/gpu/nvgpu/common/therm/therm_gp106.h | 40 ++++++ drivers/gpu/nvgpu/common/therm/therm_gp10b.c | 137 ++++++++++++++++++++ drivers/gpu/nvgpu/common/therm/therm_gp10b.h | 29 +++++ drivers/gpu/nvgpu/common/therm/therm_gv11b.c | 183 ++++++++++++++++++++++++++ drivers/gpu/nvgpu/common/therm/therm_gv11b.h | 30 +++++ 9 files changed, 829 insertions(+) create mode 100644 drivers/gpu/nvgpu/common/therm/therm.c create mode 100644 drivers/gpu/nvgpu/common/therm/therm_gm20b.c create mode 100644 drivers/gpu/nvgpu/common/therm/therm_gm20b.h create mode 100644 drivers/gpu/nvgpu/common/therm/therm_gp106.c create mode 100644 drivers/gpu/nvgpu/common/therm/therm_gp106.h create mode 100644 drivers/gpu/nvgpu/common/therm/therm_gp10b.c create mode 100644 drivers/gpu/nvgpu/common/therm/therm_gp10b.h create mode 100644 drivers/gpu/nvgpu/common/therm/therm_gv11b.c create mode 100644 drivers/gpu/nvgpu/common/therm/therm_gv11b.h (limited to 'drivers/gpu/nvgpu/common/therm') diff --git a/drivers/gpu/nvgpu/common/therm/therm.c b/drivers/gpu/nvgpu/common/therm/therm.c new file mode 100644 index 00000000..cfe8a2c1 --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "gk20a/gk20a.h" + +int nvgpu_init_therm_support(struct gk20a *g) +{ + u32 err = 0U; + + nvgpu_log_fn(g, " "); + + if (g->ops.therm.init_therm_setup_hw) + err = g->ops.therm.init_therm_setup_hw(g); + if (err) + return err; + +#ifdef CONFIG_DEBUG_FS + if (g->ops.therm.therm_debugfs_init) + g->ops.therm.therm_debugfs_init(g); +#endif + + return err; +} diff --git a/drivers/gpu/nvgpu/common/therm/therm_gm20b.c b/drivers/gpu/nvgpu/common/therm/therm_gm20b.c new file mode 100644 index 00000000..023ec36a --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm_gm20b.c @@ -0,0 +1,187 @@ +/* + * GM20B THERMAL + * + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include "gk20a/gk20a.h" + +#include "therm_gm20b.h" + +#include + +int gm20b_init_therm_setup_hw(struct gk20a *g) +{ + u32 v; + + nvgpu_log_fn(g, " "); + + /* program NV_THERM registers */ + gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | + therm_use_a_ext_therm_1_enable_f() | + therm_use_a_ext_therm_2_enable_f()); + gk20a_writel(g, therm_evt_ext_therm_0_r(), + therm_evt_ext_therm_0_slow_factor_f(0x2)); + gk20a_writel(g, therm_evt_ext_therm_1_r(), + therm_evt_ext_therm_1_slow_factor_f(0x6)); + gk20a_writel(g, therm_evt_ext_therm_2_r(), + therm_evt_ext_therm_2_slow_factor_f(0xe)); + + gk20a_writel(g, therm_grad_stepping_table_r(0), + therm_grad_stepping_table_slowdown_factor0_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f()) | + therm_grad_stepping_table_slowdown_factor1_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f()) | + therm_grad_stepping_table_slowdown_factor2_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f()) | + therm_grad_stepping_table_slowdown_factor3_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor4_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f())); + gk20a_writel(g, therm_grad_stepping_table_r(1), + therm_grad_stepping_table_slowdown_factor0_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor1_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor2_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor3_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor4_f(therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f())); + + v = gk20a_readl(g, therm_clk_timing_r(0)); + v |= therm_clk_timing_grad_slowdown_enabled_f(); + gk20a_writel(g, therm_clk_timing_r(0), v); + + v = gk20a_readl(g, therm_config2_r()); + v |= therm_config2_grad_enable_f(1); + v |= therm_config2_slowdown_factor_extended_f(1); + gk20a_writel(g, therm_config2_r(), v); + + gk20a_writel(g, therm_grad_stepping1_r(), + therm_grad_stepping1_pdiv_duration_f(32)); + + v = gk20a_readl(g, therm_grad_stepping0_r()); + v |= therm_grad_stepping0_feature_enable_f(); + gk20a_writel(g, therm_grad_stepping0_r(), v); + + return 0; +} + +int gm20b_elcg_init_idle_filters(struct gk20a *g) +{ + u32 gate_ctrl, idle_filter; + u32 engine_id; + u32 active_engine_id = 0; + struct fifo_gk20a *f = &g->fifo; + + nvgpu_log_fn(g, " "); + + for (engine_id = 0; engine_id < f->num_engines; engine_id++) { + active_engine_id = f->active_engines_list[engine_id]; + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id)); + + if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) { + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_delay_after_m(), + therm_gate_ctrl_eng_delay_after_f(4)); + } + + /* 2 * (1 << 9) = 1024 clks */ + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_exp_m(), + therm_gate_ctrl_eng_idle_filt_exp_f(9)); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_mant_m(), + therm_gate_ctrl_eng_idle_filt_mant_f(2)); + gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl); + } + + /* default fecs_idle_filter to 0 */ + idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r()); + idle_filter &= ~therm_fecs_idle_filter_value_m(); + gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter); + /* default hubmmu_idle_filter to 0 */ + idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r()); + idle_filter &= ~therm_hubmmu_idle_filter_value_m(); + gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); + + nvgpu_log_fn(g, "done"); + return 0; +} + +void gm20b_therm_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine) +{ + u32 gate_ctrl; + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine)); + + switch (mode) { + case BLCG_RUN: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_blk_clk_m(), + therm_gate_ctrl_blk_clk_run_f()); + break; + case BLCG_AUTO: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_blk_clk_m(), + therm_gate_ctrl_blk_clk_auto_f()); + break; + default: + nvgpu_err(g, + "invalid blcg mode %d", mode); + return; + } + + gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl); +} + +void gm20b_therm_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine) +{ + u32 gate_ctrl; + + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine)); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) + return; + + switch (mode) { + case ELCG_RUN: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_run_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_pwr_m(), + /* set elpg to auto to meet hw expectation */ + therm_gate_ctrl_eng_pwr_auto_f()); + break; + case ELCG_STOP: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_stop_f()); + break; + case ELCG_AUTO: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_auto_f()); + break; + default: + nvgpu_err(g, + "invalid elcg mode %d", mode); + } + + gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl); +} diff --git a/drivers/gpu/nvgpu/common/therm/therm_gm20b.h b/drivers/gpu/nvgpu/common/therm/therm_gm20b.h new file mode 100644 index 00000000..b6dfc5b6 --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm_gm20b.h @@ -0,0 +1,33 @@ +/* + * GM20B THERMAL + * + * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef THERM_GM20B_H +#define THERM_GM20B_H + +struct gk20a; +int gm20b_init_therm_setup_hw(struct gk20a *g); +int gm20b_elcg_init_idle_filters(struct gk20a *g); +void gm20b_therm_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine); +void gm20b_therm_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine); + +#endif /* THERM_GM20B_H */ diff --git a/drivers/gpu/nvgpu/common/therm/therm_gp106.c b/drivers/gpu/nvgpu/common/therm/therm_gp106.c new file mode 100644 index 00000000..1f82aa7a --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm_gp106.c @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include "gk20a/gk20a.h" + +#include "therm_gp106.h" +#include "therm/thrmpmu.h" + +#ifdef CONFIG_DEBUG_FS +#include +#include "os/linux/os_linux.h" +#endif + +#include + +#include + +void gp106_get_internal_sensor_limits(s32 *max_24_8, s32 *min_24_8) +{ + *max_24_8 = (0x87 << 8); + *min_24_8 = (((u32)-216) << 8); +} + +int gp106_get_internal_sensor_curr_temp(struct gk20a *g, u32 *temp_f24_8) +{ + int err = 0; + u32 readval; + + readval = gk20a_readl(g, therm_temp_sensor_tsense_r()); + + if (!(therm_temp_sensor_tsense_state_v(readval) & + therm_temp_sensor_tsense_state_valid_v())) { + nvgpu_err(g, + "Attempt to read temperature while sensor is OFF!"); + err = -EINVAL; + } else if (therm_temp_sensor_tsense_state_v(readval) & + therm_temp_sensor_tsense_state_shadow_v()) { + nvgpu_err(g, "Reading temperature from SHADOWed sensor!"); + } + + // Convert from F9.5 -> F27.5 -> F24.8. + readval &= therm_temp_sensor_tsense_fixed_point_m(); + + *temp_f24_8 = readval; + + return err; +} + +#ifdef CONFIG_DEBUG_FS +static int therm_get_internal_sensor_curr_temp(void *data, u64 *val) +{ + struct gk20a *g = (struct gk20a *)data; + u32 readval; + int err; + + err = gp106_get_internal_sensor_curr_temp(g, &readval); + if (!err) + *val = readval; + + return err; +} +DEFINE_SIMPLE_ATTRIBUTE(therm_ctrl_fops, therm_get_internal_sensor_curr_temp, NULL, "%llu\n"); + +void gp106_therm_debugfs_init(struct gk20a *g) +{ + struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); + struct dentry *dbgentry; + + dbgentry = debugfs_create_file( + "temp", S_IRUGO, l->debugfs, g, &therm_ctrl_fops); + if (!dbgentry) + nvgpu_err(g, "debugfs entry create failed for therm_curr_temp"); +} +#endif + +int gp106_elcg_init_idle_filters(struct gk20a *g) +{ + u32 gate_ctrl, idle_filter; + u32 engine_id; + u32 active_engine_id = 0; + struct fifo_gk20a *f = &g->fifo; + + nvgpu_log_fn(g, " "); + + for (engine_id = 0; engine_id < f->num_engines; engine_id++) { + active_engine_id = f->active_engines_list[engine_id]; + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id)); + + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_exp_m(), + therm_gate_ctrl_eng_idle_filt_exp_f(2)); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_mant_m(), + therm_gate_ctrl_eng_idle_filt_mant_f(1)); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_delay_before_m(), + therm_gate_ctrl_eng_delay_before_f(0)); + gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl); + } + + /* default fecs_idle_filter to 0 */ + idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r()); + idle_filter &= ~therm_fecs_idle_filter_value_m(); + gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter); + /* default hubmmu_idle_filter to 0 */ + idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r()); + idle_filter &= ~therm_hubmmu_idle_filter_value_m(); + gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); + + nvgpu_log_fn(g, "done"); + return 0; +} + +u32 gp106_configure_therm_alert(struct gk20a *g, s32 curr_warn_temp) +{ + u32 err = 0; + + if (g->curr_warn_temp != curr_warn_temp) { + g->curr_warn_temp = curr_warn_temp; + err = therm_configure_therm_alert(g); + } + + return err; +} diff --git a/drivers/gpu/nvgpu/common/therm/therm_gp106.h b/drivers/gpu/nvgpu/common/therm/therm_gp106.h new file mode 100644 index 00000000..a92c2e0f --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm_gp106.h @@ -0,0 +1,40 @@ +/* + * general thermal control structures & definitions + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVGPU_THERM_GP106_H +#define NVGPU_THERM_GP106_H + +#include + +struct gk20a; + +void gp106_get_internal_sensor_limits(s32 *max_24_8, s32 *min_24_8); +int gp106_get_internal_sensor_curr_temp(struct gk20a *g, u32 *temp_f24_8); +#ifdef CONFIG_DEBUG_FS +void gp106_therm_debugfs_init(struct gk20a *g); +#endif +int gp106_elcg_init_idle_filters(struct gk20a *g); +u32 gp106_configure_therm_alert(struct gk20a *g, s32 curr_warn_temp); + +#endif diff --git a/drivers/gpu/nvgpu/common/therm/therm_gp10b.c b/drivers/gpu/nvgpu/common/therm/therm_gp10b.c new file mode 100644 index 00000000..905ff178 --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm_gp10b.c @@ -0,0 +1,137 @@ +/* + * GP10B Therm + * + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" +#include "therm_gp10b.h" + +#include +#include +#include + +#include + +int gp10b_init_therm_setup_hw(struct gk20a *g) +{ + u32 v; + + nvgpu_log_fn(g, " "); + + /* program NV_THERM registers */ + gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | + therm_use_a_ext_therm_1_enable_f() | + therm_use_a_ext_therm_2_enable_f()); + gk20a_writel(g, therm_evt_ext_therm_0_r(), + therm_evt_ext_therm_0_slow_factor_f(0x2)); + gk20a_writel(g, therm_evt_ext_therm_1_r(), + therm_evt_ext_therm_1_slow_factor_f(0x6)); + gk20a_writel(g, therm_evt_ext_therm_2_r(), + therm_evt_ext_therm_2_slow_factor_f(0xe)); + + gk20a_writel(g, therm_grad_stepping_table_r(0), + therm_grad_stepping_table_slowdown_factor0_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f()) | + therm_grad_stepping_table_slowdown_factor1_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f()) | + therm_grad_stepping_table_slowdown_factor2_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f()) | + therm_grad_stepping_table_slowdown_factor3_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor4_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f())); + + gk20a_writel(g, therm_grad_stepping_table_r(1), + therm_grad_stepping_table_slowdown_factor0_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor1_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor2_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor3_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f()) | + therm_grad_stepping_table_slowdown_factor4_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f())); + + v = gk20a_readl(g, therm_clk_timing_r(0)); + v |= therm_clk_timing_grad_slowdown_enabled_f(); + gk20a_writel(g, therm_clk_timing_r(0), v); + + v = gk20a_readl(g, therm_config2_r()); + v |= therm_config2_grad_enable_f(1); + v |= therm_config2_slowdown_factor_extended_f(1); + gk20a_writel(g, therm_config2_r(), v); + + gk20a_writel(g, therm_grad_stepping1_r(), + therm_grad_stepping1_pdiv_duration_f(32)); + + v = gk20a_readl(g, therm_grad_stepping0_r()); + v |= therm_grad_stepping0_feature_enable_f(); + gk20a_writel(g, therm_grad_stepping0_r(), v); + + return 0; +} + +int gp10b_elcg_init_idle_filters(struct gk20a *g) +{ + u32 gate_ctrl, idle_filter; + u32 engine_id; + u32 active_engine_id = 0; + struct fifo_gk20a *f = &g->fifo; + + nvgpu_log_fn(g, " "); + + for (engine_id = 0; engine_id < f->num_engines; engine_id++) { + active_engine_id = f->active_engines_list[engine_id]; + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id)); + + if (nvgpu_platform_is_simulation(g)) { + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_delay_after_m(), + therm_gate_ctrl_eng_delay_after_f(4)); + } + + /* 2 * (1 << 9) = 1024 clks */ + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_exp_m(), + therm_gate_ctrl_eng_idle_filt_exp_f(9)); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_mant_m(), + therm_gate_ctrl_eng_idle_filt_mant_f(2)); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_delay_before_m(), + therm_gate_ctrl_eng_delay_before_f(4)); + gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl); + } + + /* default fecs_idle_filter to 0 */ + idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r()); + idle_filter &= ~therm_fecs_idle_filter_value_m(); + gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter); + /* default hubmmu_idle_filter to 0 */ + idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r()); + idle_filter &= ~therm_hubmmu_idle_filter_value_m(); + gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); + + nvgpu_log_fn(g, "done"); + return 0; +} diff --git a/drivers/gpu/nvgpu/common/therm/therm_gp10b.h b/drivers/gpu/nvgpu/common/therm/therm_gp10b.h new file mode 100644 index 00000000..2a40b73c --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm_gp10b.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef THERM_GP10B_H +#define THERM_GP10B_H + +struct gk20a; +int gp10b_init_therm_setup_hw(struct gk20a *g); +int gp10b_elcg_init_idle_filters(struct gk20a *g); + +#endif /* THERM_GP10B_H */ diff --git a/drivers/gpu/nvgpu/common/therm/therm_gv11b.c b/drivers/gpu/nvgpu/common/therm/therm_gv11b.c new file mode 100644 index 00000000..77edd7e1 --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm_gv11b.c @@ -0,0 +1,183 @@ +/* + * GV11B Therm + * + * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" +#include "therm_gv11b.h" + +#include +#include +#include + +#include + +#include "therm_gv11b.h" + +int gv11b_init_therm_setup_hw(struct gk20a *g) +{ + u32 v; + + nvgpu_log_fn(g, " "); + + /* program NV_THERM registers */ + gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | + therm_use_a_ext_therm_1_enable_f() | + therm_use_a_ext_therm_2_enable_f()); + gk20a_writel(g, therm_evt_ext_therm_0_r(), + therm_evt_ext_therm_0_slow_factor_f(0x2)); + gk20a_writel(g, therm_evt_ext_therm_1_r(), + therm_evt_ext_therm_1_slow_factor_f(0x6)); + gk20a_writel(g, therm_evt_ext_therm_2_r(), + therm_evt_ext_therm_2_slow_factor_f(0xe)); + + gk20a_writel(g, therm_grad_stepping_table_r(0), + therm_grad_stepping_table_slowdown_factor0_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by1_f()) | + therm_grad_stepping_table_slowdown_factor1_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f()) | + therm_grad_stepping_table_slowdown_factor2_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f()) | + therm_grad_stepping_table_slowdown_factor3_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f()) | + therm_grad_stepping_table_slowdown_factor4_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f())); + + gk20a_writel(g, therm_grad_stepping_table_r(1), + therm_grad_stepping_table_slowdown_factor0_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by16_f()) | + therm_grad_stepping_table_slowdown_factor1_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by32_f()) | + therm_grad_stepping_table_slowdown_factor2_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by32_f()) | + therm_grad_stepping_table_slowdown_factor3_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by32_f()) | + therm_grad_stepping_table_slowdown_factor4_f( + therm_grad_stepping_table_slowdown_factor0_fpdiv_by32_f())); + + v = gk20a_readl(g, therm_clk_timing_r(0)); + v |= therm_clk_timing_grad_slowdown_enabled_f(); + gk20a_writel(g, therm_clk_timing_r(0), v); + + v = gk20a_readl(g, therm_config2_r()); + v |= therm_config2_grad_enable_f(1); + v |= therm_config2_slowdown_factor_extended_f(1); + v = set_field(v, therm_config2_grad_step_duration_m(), + therm_config2_grad_step_duration_f(0)); + gk20a_writel(g, therm_config2_r(), v); + + gk20a_writel(g, therm_grad_stepping1_r(), + therm_grad_stepping1_pdiv_duration_f(0xbf4)); + + v = gk20a_readl(g, therm_grad_stepping0_r()); + v |= therm_grad_stepping0_feature_enable_f(); + gk20a_writel(g, therm_grad_stepping0_r(), v); + + /* disable idle clock slowdown */ + v = therm_clk_slowdown_2_idle_condition_a_select_f(0) | + therm_clk_slowdown_2_idle_condition_a_type_never_f() | + therm_clk_slowdown_2_idle_condition_b_type_never_f(); + gk20a_writel(g, therm_clk_slowdown_2_r(0), v); + + return 0; +} + +void gv11b_therm_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine) +{ + u32 gate_ctrl; + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) + return; + + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine)); + + switch (mode) { + case ELCG_RUN: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_run_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_idle_holdoff_m(), + therm_gate_ctrl_idle_holdoff_on_f()); + break; + case ELCG_STOP: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_stop_f()); + break; + case ELCG_AUTO: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_auto_f()); + break; + default: + nvgpu_err(g, "invalid elcg mode %d", mode); + } + + gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl); +} + +int gv11b_elcg_init_idle_filters(struct gk20a *g) +{ + u32 gate_ctrl, idle_filter; + u32 engine_id; + u32 active_engine_id = 0; + struct fifo_gk20a *f = &g->fifo; + + if (nvgpu_platform_is_simulation(g)) + return 0; + + nvgpu_log_info(g, "init clock/power gate reg"); + + for (engine_id = 0; engine_id < f->num_engines; engine_id++) { + active_engine_id = f->active_engines_list[engine_id]; + + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id)); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_exp_m(), + therm_gate_ctrl_eng_idle_filt_exp__prod_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_mant_m(), + therm_gate_ctrl_eng_idle_filt_mant__prod_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_delay_before_m(), + therm_gate_ctrl_eng_delay_before__prod_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_delay_after_m(), + therm_gate_ctrl_eng_delay_after__prod_f()); + gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl); + } + + idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r()); + idle_filter = set_field(idle_filter, + therm_fecs_idle_filter_value_m(), + therm_fecs_idle_filter_value__prod_f()); + gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter); + + idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r()); + idle_filter = set_field(idle_filter, + therm_hubmmu_idle_filter_value_m(), + therm_hubmmu_idle_filter_value__prod_f()); + gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); + + return 0; +} diff --git a/drivers/gpu/nvgpu/common/therm/therm_gv11b.h b/drivers/gpu/nvgpu/common/therm/therm_gv11b.h new file mode 100644 index 00000000..7058af9f --- /dev/null +++ b/drivers/gpu/nvgpu/common/therm/therm_gv11b.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef THERM_GV11B_H +#define THERM_GV11B_H + +struct gk20a; +int gv11b_elcg_init_idle_filters(struct gk20a *g); +int gv11b_init_therm_setup_hw(struct gk20a *g); +void gv11b_therm_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine); + +#endif /* THERM_GV11B_H */ -- cgit v1.2.2