From e62785190f74cfbf9003a190a768e9077373bf6f Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Fri, 10 Aug 2018 08:28:23 -0700 Subject: gpu: nvgpu: Move priv_ring HAL to common Move implementation of priv_ring HAL to common/priv_ring. Implement two new HAL APIs to remove illegal dependencies: enable_priv_ring and enum_ltc. As enum_ltc can be implemented only gm20b onwards, bump gk20a implementation to base on gm20b. JIRA NVGPU-964 Change-Id: I160c2216132aadbcd98bb4a688aeeb2c520a9bc0 Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1797025 Reviewed-by: mobile promotions Tested-by: mobile promotions --- .../gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c | 121 +++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c (limited to 'drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c') diff --git a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c new file mode 100644 index 00000000..1445473a --- /dev/null +++ b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" + +#include +#include +#include +#include +#include + +#include "priv_ring_gm20b.h" + +#include +#include +#include + +void gm20b_priv_ring_enable(struct gk20a *g) +{ + if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) + return; + + nvgpu_log(g, gpu_dbg_info, "enabling priv ring"); + + if (g->ops.clock_gating.slcg_priring_load_gating_prod) + g->ops.clock_gating.slcg_priring_load_gating_prod(g, + g->slcg_enabled); + + gk20a_writel(g,pri_ringmaster_command_r(), + 0x4); + + gk20a_writel(g, pri_ringstation_sys_decode_config_r(), + 0x2); + gk20a_readl(g, pri_ringstation_sys_decode_config_r()); +} + +void gm20b_priv_ring_isr(struct gk20a *g) +{ + u32 status0, status1; + u32 cmd; + s32 retry = 100; + u32 gpc; + u32 gpc_priv_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_PRIV_STRIDE); + + if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) + return; + + status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r()); + status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r()); + + nvgpu_log(g, gpu_dbg_intr, "ringmaster intr status0: 0x%08x," + "status1: 0x%08x", status0, status1); + + if (pri_ringmaster_intr_status0_gbl_write_error_sys_v(status0) != 0) { + nvgpu_log(g, gpu_dbg_intr, "SYS write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", + gk20a_readl(g, pri_ringstation_sys_priv_error_adr_r()), + gk20a_readl(g, pri_ringstation_sys_priv_error_wrdat_r()), + gk20a_readl(g, pri_ringstation_sys_priv_error_info_r()), + gk20a_readl(g, pri_ringstation_sys_priv_error_code_r())); + } + + for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { + if (status1 & BIT(gpc)) { + nvgpu_log(g, gpu_dbg_intr, "GPC%u write error. ADR %08x WRDAT %08x INFO %08x, CODE %08x", gpc, + gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_adr_r() + gpc * gpc_priv_stride), + gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_wrdat_r() + gpc * gpc_priv_stride), + gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_info_r() + gpc * gpc_priv_stride), + gk20a_readl(g, pri_ringstation_gpc_gpc0_priv_error_code_r() + gpc * gpc_priv_stride)); + } + } + /* clear interrupt */ + cmd = gk20a_readl(g, pri_ringmaster_command_r()); + cmd = set_field(cmd, pri_ringmaster_command_cmd_m(), + pri_ringmaster_command_cmd_ack_interrupt_f()); + gk20a_writel(g, pri_ringmaster_command_r(), cmd); + /* poll for clear interrupt done */ + cmd = pri_ringmaster_command_cmd_v( + gk20a_readl(g, pri_ringmaster_command_r())); + while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && retry) { + nvgpu_udelay(20); + retry--; + cmd = pri_ringmaster_command_cmd_v( + gk20a_readl(g, pri_ringmaster_command_r())); + } + if (retry == 0 && cmd != pri_ringmaster_command_cmd_no_cmd_v()) + nvgpu_warn(g, "priv ringmaster intr ack too many retries"); +} + +void gm20b_priv_set_timeout_settings(struct gk20a *g) +{ + /* + * Bug 1340570: increase the clock timeout to avoid potential + * operation failure at high gpcclk rate. Default values are 0x400. + */ + nvgpu_writel(g, pri_ringstation_sys_master_config_r(0x15), 0x800); + nvgpu_writel(g, pri_ringstation_gpc_master_config_r(0xa), 0x800); +} + +u32 gm20b_priv_ring_enum_ltc(struct gk20a *g) +{ + return gk20a_readl(g, pri_ringmaster_enum_ltc_r()); +} -- cgit v1.2.2