From 974d541623929fa2622d27d5d338a5b63596794b Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Mon, 13 Aug 2018 12:58:18 -0700 Subject: gpu: nvgpu: Move ltc HAL to common Move implementation of ltc HAL to common/ltc. JIRA NVGPU-956 Change-Id: Id78d74e8612d7dacfb8d322d491abecd798e42b5 Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1798461 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/Makefile | 8 +- drivers/gpu/nvgpu/Makefile.sources | 8 +- drivers/gpu/nvgpu/common/ltc.c | 54 --- drivers/gpu/nvgpu/common/ltc/ltc.c | 54 +++ drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c | 572 ++++++++++++++++++++++++++ drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h | 66 +++ drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c | 320 ++++++++++++++ drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h | 35 ++ drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c | 207 ++++++++++ drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h | 34 ++ drivers/gpu/nvgpu/gk20a/gk20a.h | 18 +- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 11 +- drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h | 10 - drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 1 - drivers/gpu/nvgpu/gm20b/gr_gm20b.c | 76 ---- drivers/gpu/nvgpu/gm20b/gr_gm20b.h | 8 - drivers/gpu/nvgpu/gm20b/hal_gm20b.c | 12 +- drivers/gpu/nvgpu/gm20b/ltc_gm20b.c | 489 ---------------------- drivers/gpu/nvgpu/gm20b/ltc_gm20b.h | 49 --- drivers/gpu/nvgpu/gp106/hal_gp106.c | 13 +- drivers/gpu/nvgpu/gp10b/hal_gp10b.c | 13 +- drivers/gpu/nvgpu/gp10b/ltc_gp10b.c | 320 -------------- drivers/gpu/nvgpu/gp10b/ltc_gp10b.h | 35 -- drivers/gpu/nvgpu/gv100/hal_gv100.c | 15 +- drivers/gpu/nvgpu/gv11b/gr_gv11b.c | 10 +- drivers/gpu/nvgpu/gv11b/hal_gv11b.c | 15 +- drivers/gpu/nvgpu/gv11b/ltc_gv11b.c | 207 ---------- drivers/gpu/nvgpu/gv11b/ltc_gv11b.h | 34 -- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c | 13 +- drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c | 15 +- 30 files changed, 1366 insertions(+), 1356 deletions(-) delete mode 100644 drivers/gpu/nvgpu/common/ltc.c create mode 100644 drivers/gpu/nvgpu/common/ltc/ltc.c create mode 100644 drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c create mode 100644 drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h create mode 100644 drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c create mode 100644 drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h create mode 100644 drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c create mode 100644 drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h delete mode 100644 drivers/gpu/nvgpu/gm20b/ltc_gm20b.c delete mode 100644 drivers/gpu/nvgpu/gm20b/ltc_gm20b.h delete mode 100644 drivers/gpu/nvgpu/gp10b/ltc_gp10b.c delete mode 100644 drivers/gpu/nvgpu/gp10b/ltc_gp10b.h delete mode 100644 drivers/gpu/nvgpu/gv11b/ltc_gv11b.c delete mode 100644 drivers/gpu/nvgpu/gv11b/ltc_gv11b.h (limited to 'drivers/gpu/nvgpu') diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index 7a21d294..9da20802 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -185,7 +185,10 @@ nvgpu-y += \ common/pmu/pmu_pg.o \ common/pmu/pmu_perfmon.o \ common/pmu/pmu_debug.o \ - common/ltc.o \ + common/ltc/ltc.o \ + common/ltc/ltc_gm20b.o \ + common/ltc/ltc_gp10b.o \ + common/ltc/ltc_gv11b.o \ common/io_common.o \ common/clock_gating/gm20b_gating_reglist.o \ common/clock_gating/gp106_gating_reglist.o \ @@ -216,7 +219,6 @@ nvgpu-y += \ gk20a/fecs_trace_gk20a.o \ gk20a/mc_gk20a.o \ gm20b/hal_gm20b.o \ - gm20b/ltc_gm20b.o \ gm20b/gr_gm20b.o \ gm20b/clk_gm20b.o \ gm20b/fifo_gm20b.o \ @@ -267,7 +269,6 @@ nvgpu-y += \ gp10b/ce_gp10b.o \ gp10b/mc_gp10b.o \ gp10b/fifo_gp10b.o \ - gp10b/ltc_gp10b.o \ gp10b/mm_gp10b.o \ gp10b/pmu_gp10b.o \ gp10b/hal_gp10b.o \ @@ -293,7 +294,6 @@ nvgpu-y += \ gv11b/css_gr_gv11b.o \ gv11b/dbg_gpu_gv11b.o \ gv11b/mc_gv11b.o \ - gv11b/ltc_gv11b.o \ gv11b/hal_gv11b.o \ gv11b/gr_gv11b.o \ gv11b/fifo_gv11b.o \ diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index e4080013..2d7efd98 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -55,7 +55,10 @@ srcs := common/mm/nvgpu_allocator.c \ common/semaphore.c \ common/as.c \ common/rbtree.c \ - common/ltc.c \ + common/ltc/ltc.c \ + common/ltc/ltc_gm20b.c \ + common/ltc/ltc_gp10b.c \ + common/ltc/ltc_gv11b.c \ common/io_common.c \ common/ecc.c \ common/ce2.c \ @@ -149,7 +152,6 @@ srcs := common/mm/nvgpu_allocator.c \ gk20a/tsg_gk20a.c \ gk20a/mc_gk20a.c \ gm20b/hal_gm20b.c \ - gm20b/ltc_gm20b.c \ gm20b/gr_gm20b.c \ gm20b/clk_gm20b.c \ gm20b/fifo_gm20b.c \ @@ -164,7 +166,6 @@ srcs := common/mm/nvgpu_allocator.c \ gp10b/ce_gp10b.c \ gp10b/mc_gp10b.c \ gp10b/fifo_gp10b.c \ - gp10b/ltc_gp10b.c \ gp10b/mm_gp10b.c \ gp10b/pmu_gp10b.c \ gp10b/hal_gp10b.c \ @@ -176,7 +177,6 @@ srcs := common/mm/nvgpu_allocator.c \ gv11b/gv11b.c \ gv11b/dbg_gpu_gv11b.c \ gv11b/mc_gv11b.c \ - gv11b/ltc_gv11b.c \ gv11b/hal_gv11b.c \ gv11b/gr_gv11b.c \ gv11b/fifo_gv11b.c \ diff --git a/drivers/gpu/nvgpu/common/ltc.c b/drivers/gpu/nvgpu/common/ltc.c deleted file mode 100644 index 1beb1974..00000000 --- a/drivers/gpu/nvgpu/common/ltc.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include - -#include "gk20a/gk20a.h" -#include "gk20a/gr_gk20a.h" - -int nvgpu_init_ltc_support(struct gk20a *g) -{ - nvgpu_spinlock_init(&g->ltc_enabled_lock); - - g->mm.ltc_enabled_current = true; - g->mm.ltc_enabled_target = true; - - if (g->ops.ltc.init_fs_state) - g->ops.ltc.init_fs_state(g); - - return 0; -} - -void nvgpu_ltc_sync_enabled(struct gk20a *g) -{ - if (!g->ops.ltc.set_enabled) - return; - - nvgpu_spinlock_acquire(&g->ltc_enabled_lock); - if (g->mm.ltc_enabled_current != g->mm.ltc_enabled_target) { - g->ops.ltc.set_enabled(g, g->mm.ltc_enabled_target); - g->mm.ltc_enabled_current = g->mm.ltc_enabled_target; - } - nvgpu_spinlock_release(&g->ltc_enabled_lock); -} diff --git a/drivers/gpu/nvgpu/common/ltc/ltc.c b/drivers/gpu/nvgpu/common/ltc/ltc.c new file mode 100644 index 00000000..1beb1974 --- /dev/null +++ b/drivers/gpu/nvgpu/common/ltc/ltc.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/gr_gk20a.h" + +int nvgpu_init_ltc_support(struct gk20a *g) +{ + nvgpu_spinlock_init(&g->ltc_enabled_lock); + + g->mm.ltc_enabled_current = true; + g->mm.ltc_enabled_target = true; + + if (g->ops.ltc.init_fs_state) + g->ops.ltc.init_fs_state(g); + + return 0; +} + +void nvgpu_ltc_sync_enabled(struct gk20a *g) +{ + if (!g->ops.ltc.set_enabled) + return; + + nvgpu_spinlock_acquire(&g->ltc_enabled_lock); + if (g->mm.ltc_enabled_current != g->mm.ltc_enabled_target) { + g->ops.ltc.set_enabled(g, g->mm.ltc_enabled_target); + g->mm.ltc_enabled_current = g->mm.ltc_enabled_target; + } + nvgpu_spinlock_release(&g->ltc_enabled_lock); +} diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c new file mode 100644 index 00000000..28d63e82 --- /dev/null +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.c @@ -0,0 +1,572 @@ +/* + * GM20B L2 + * + * Copyright (c) 2014-2018 NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "gk20a/gk20a.h" + +#include "ltc_gm20b.h" + +int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) +{ + /* max memory size (MB) to cover */ + u32 max_size = gr->max_comptag_mem; + /* one tag line covers 128KB */ + u32 max_comptag_lines = max_size << 3U; + + u32 hw_max_comptag_lines = + ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(); + + u32 cbc_param = + gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); + u32 comptags_per_cacheline = + ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param); + + u32 compbit_backing_size; + + int err; + + nvgpu_log_fn(g, " "); + + if (max_comptag_lines == 0U) + return 0; + + if (max_comptag_lines > hw_max_comptag_lines) + max_comptag_lines = hw_max_comptag_lines; + + compbit_backing_size = + DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) * + gr->cacheline_size * gr->slices_per_ltc * g->ltc_count; + + /* aligned to 2KB * ltc_count */ + compbit_backing_size += + g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); + + /* must be a multiple of 64KB */ + compbit_backing_size = roundup(compbit_backing_size, 64*1024); + + max_comptag_lines = + (compbit_backing_size * comptags_per_cacheline) / + (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count); + + if (max_comptag_lines > hw_max_comptag_lines) + max_comptag_lines = hw_max_comptag_lines; + + nvgpu_log_info(g, "compbit backing store size : %d", + compbit_backing_size); + nvgpu_log_info(g, "max comptag lines : %d", + max_comptag_lines); + + err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); + if (err) + return err; + + err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); + if (err) + return err; + + gr->max_comptag_lines = max_comptag_lines; + gr->comptags_per_cacheline = comptags_per_cacheline; + + return 0; +} + +int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, + u32 min, u32 max) +{ + struct gr_gk20a *gr = &g->gr; + struct nvgpu_timeout timeout; + int err = 0; + u32 ltc, slice, ctrl1, val, hw_op = 0U; + u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( + gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + const u32 max_lines = 16384U; + + nvgpu_log_fn(g, " "); + + trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); + + if (gr->compbit_store.mem.size == 0) + return 0; + + while (1) { + const u32 iter_max = min(min + max_lines - 1, max); + bool full_cache_op = true; + + nvgpu_mutex_acquire(&g->mm.l2_op_lock); + + nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max); + + if (op == gk20a_cbc_op_clear) { + gk20a_writel( + g, ltc_ltcs_ltss_cbc_ctrl2_r(), + ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f( + min)); + gk20a_writel( + g, ltc_ltcs_ltss_cbc_ctrl3_r(), + ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f( + iter_max)); + hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(); + full_cache_op = false; + } else if (op == gk20a_cbc_op_clean) { + /* this is full-cache op */ + hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(); + } else if (op == gk20a_cbc_op_invalidate) { + /* this is full-cache op */ + hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(); + } else { + nvgpu_err(g, "Unknown op: %u", (unsigned)op); + err = -EINVAL; + goto out; + } + gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(), + gk20a_readl(g, + ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op); + + for (ltc = 0; ltc < g->ltc_count; ltc++) { + for (slice = 0; slice < slices_per_ltc; slice++) { + + ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() + + ltc * ltc_stride + slice * lts_stride; + + nvgpu_timeout_init(g, &timeout, 2000, + NVGPU_TIMER_RETRY_TIMER); + do { + val = gk20a_readl(g, ctrl1); + if (!(val & hw_op)) + break; + nvgpu_udelay(5); + } while (!nvgpu_timeout_expired(&timeout)); + + if (nvgpu_timeout_peek_expired(&timeout)) { + nvgpu_err(g, "comp tag clear timeout"); + err = -EBUSY; + goto out; + } + } + } + + /* are we done? */ + if (full_cache_op || iter_max == max) + break; + + /* note: iter_max is inclusive upper bound */ + min = iter_max + 1; + + /* give a chance for higher-priority threads to progress */ + nvgpu_mutex_release(&g->mm.l2_op_lock); + } +out: + trace_gk20a_ltc_cbc_ctrl_done(g->name); + nvgpu_mutex_release(&g->mm.l2_op_lock); + return err; +} + +void gm20b_ltc_init_fs_state(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + u32 reg; + + nvgpu_log_info(g, "initialize gm20b l2"); + + g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); + g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); + nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count); + + reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); + gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);; + gr->cacheline_size = + 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg); + + gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(), + g->ltc_count); + gk20a_writel(g, ltc_ltcs_misc_ltc_num_active_ltcs_r(), + g->ltc_count); + + gk20a_writel(g, ltc_ltcs_ltss_dstg_cfg0_r(), + gk20a_readl(g, ltc_ltc0_lts0_dstg_cfg0_r()) | + ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m()); + + /* Disable LTC interrupts */ + reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); + reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m(); + reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); + reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_m(); + gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg); +} + +void gm20b_ltc_isr(struct gk20a *g) +{ + u32 mc_intr, ltc_intr; + unsigned int ltc, slice; + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + + mc_intr = gk20a_readl(g, mc_intr_ltc_r()); + nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); + for (ltc = 0; ltc < g->ltc_count; ltc++) { + if ((mc_intr & 1U << ltc) == 0) + continue; + for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { + ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + + ltc_stride * ltc + + lts_stride * slice); + nvgpu_err(g, "ltc%d, slice %d: %08x", + ltc, slice, ltc_intr); + gk20a_writel(g, ltc_ltc0_lts0_intr_r() + + ltc_stride * ltc + + lts_stride * slice, + ltc_intr); + } + } +} + +u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base) +{ + u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r()); + if (val == 2U) { + return base * 2; + } else if (val != 1) { + nvgpu_err(g, "Invalid number of active ltcs: %08x", val); + } + + return base; +} + +/* + * Performs a full flush of the L2 cache. + */ +void gm20b_flush_ltc(struct gk20a *g) +{ + struct nvgpu_timeout timeout; + unsigned int ltc; + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + + /* Clean... */ + nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt1_r(), + ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f() | + ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f() | + ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f() | + ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f() | + ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f() | + ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f()); + + /* Wait on each LTC individually. */ + for (ltc = 0; ltc < g->ltc_count; ltc++) { + u32 op_pending; + + /* + * Use 5ms - this should be sufficient time to flush the cache. + * On tegra, rough EMC BW available for old tegra chips (newer + * chips are strictly faster) can be estimated as follows: + * + * Lowest reasonable EMC clock speed will be around 102MHz on + * t124 for display enabled boards and generally fixed to max + * for non-display boards (since they are generally plugged in). + * + * Thus, the available BW is 64b * 2 * 102MHz = 1.3GB/s. Of that + * BW the GPU will likely get about half (display and overhead/ + * utilization inefficiency eating the rest) so 650MB/s at + * worst. Assuming at most 1MB of GPU L2 cache (less for most + * chips) worst case is we take 1MB/650MB/s = 1.5ms. + * + * So 5ms timeout here should be more than sufficient. + */ + nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER); + + do { + int cmgmt1 = ltc_ltc0_ltss_tstg_cmgmt1_r() + + ltc * ltc_stride; + op_pending = gk20a_readl(g, cmgmt1); + } while ((op_pending & + ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f()) && + !nvgpu_timeout_expired_msg(&timeout, + "L2 flush timeout!")); + } + + /* And invalidate. */ + nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt0_r(), + ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f() | + ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f() | + ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f() | + ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f() | + ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f()); + + /* Wait on each LTC individually. */ + for (ltc = 0; ltc < g->ltc_count; ltc++) { + u32 op_pending; + + /* Again, 5ms. */ + nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER); + + do { + int cmgmt0 = ltc_ltc0_ltss_tstg_cmgmt0_r() + + ltc * ltc_stride; + op_pending = gk20a_readl(g, cmgmt0); + } while ((op_pending & + ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f()) && + !nvgpu_timeout_expired_msg(&timeout, + "L2 flush timeout!")); + } +} + +int gm20b_determine_L2_size_bytes(struct gk20a *g) +{ + u32 lts_per_ltc; + u32 ways; + u32 sets; + u32 bytes_per_line; + u32 active_ltcs; + u32 cache_size; + + u32 tmp; + u32 active_sets_value; + + tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r()); + ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp)); + + active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp); + if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) { + sets = 64U; + } else if (active_sets_value == + ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) { + sets = 32U; + } else if (active_sets_value == + ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) { + sets = 16U; + } else { + nvgpu_err(g, "Unknown constant %u for active sets", + (unsigned)active_sets_value); + sets = 0U; + } + + active_ltcs = g->gr.num_fbps; + + /* chip-specific values */ + lts_per_ltc = 2U; + bytes_per_line = 128U; + cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line; + + return cache_size; +} + +/* + * Sets the ZBC color for the passed index. + */ +void gm20b_ltc_set_zbc_color_entry(struct gk20a *g, + struct zbc_entry *color_val, + u32 index) +{ + u32 i; + u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; + + nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); + + for (i = 0; + i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) { + nvgpu_writel_check(g, + ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i), + color_val->color_l2[i]); + } +} + +/* + * Sets the ZBC depth for the passed index. + */ +void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g, + struct zbc_entry *depth_val, + u32 index) +{ + u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; + + nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); + + nvgpu_writel_check(g, + ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(), + depth_val->depth); +} + +void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) +{ + u32 max_size = gr->max_comptag_mem; + u32 max_comptag_lines = max_size << 3U; + + u32 compbit_base_post_divide; + u64 compbit_base_post_multiply64; + u64 compbit_store_iova; + u64 compbit_base_post_divide64; + + if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) + compbit_store_iova = nvgpu_mem_get_phys_addr(g, + &gr->compbit_store.mem); + else + compbit_store_iova = nvgpu_mem_get_addr(g, + &gr->compbit_store.mem); + + compbit_base_post_divide64 = compbit_store_iova >> + ltc_ltcs_ltss_cbc_base_alignment_shift_v(); + + do_div(compbit_base_post_divide64, g->ltc_count); + compbit_base_post_divide = u64_lo32(compbit_base_post_divide64); + + compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * + g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); + + if (compbit_base_post_multiply64 < compbit_store_iova) + compbit_base_post_divide++; + + /* Bug 1477079 indicates sw adjustment on the posted divided base. */ + if (g->ops.ltc.cbc_fix_config) + compbit_base_post_divide = + g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); + + gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), + compbit_base_post_divide); + + nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, + "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", + (u32)(compbit_store_iova >> 32), + (u32)(compbit_store_iova & 0xffffffff), + compbit_base_post_divide); + + gr->compbit_store.base_hw = compbit_base_post_divide; + + g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate, + 0, max_comptag_lines - 1); + +} + +void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled) +{ + u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); + u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); + + if (enabled) + /* bypass disabled (normal caching ops)*/ + reg &= ~reg_f; + else + /* bypass enabled (no caching) */ + reg |= reg_f; + + gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); +} + +/* + * LTC pri addressing + */ +bool gm20b_ltc_pri_is_ltc_addr(struct gk20a *g, u32 addr) +{ + return ((addr >= ltc_pltcg_base_v()) && (addr < ltc_pltcg_extent_v())); +} + +bool gm20b_ltc_is_ltcs_ltss_addr(struct gk20a *g, u32 addr) +{ + u32 ltc_shared_base = ltc_ltcs_ltss_v(); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + + return (addr >= ltc_shared_base) && + (addr < (ltc_shared_base + lts_stride)); +} + +bool gm20b_ltc_is_ltcn_ltss_addr(struct gk20a *g, u32 addr) +{ + u32 lts_shared_base = ltc_ltc0_ltss_v(); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + u32 addr_mask = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE) - 1; + u32 base_offset = lts_shared_base & addr_mask; + u32 end_offset = base_offset + lts_stride; + + return (!gm20b_ltc_is_ltcs_ltss_addr(g, addr)) && + ((addr & addr_mask) >= base_offset) && + ((addr & addr_mask) < end_offset); +} + +static void gm20b_ltc_update_ltc_lts_addr(struct gk20a *g, u32 addr, u32 ltc_num, + u32 *priv_addr_table, + u32 *priv_addr_table_index) +{ + u32 num_ltc_slices = g->ops.gr.get_max_lts_per_ltc(g); + u32 index = *priv_addr_table_index; + u32 lts_num; + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + + for (lts_num = 0; lts_num < num_ltc_slices; lts_num++) { + priv_addr_table[index++] = ltc_ltc0_lts0_v() + + ltc_num * ltc_stride + + lts_num * lts_stride + + (addr & (lts_stride - 1)); + } + + *priv_addr_table_index = index; +} + +void gm20b_ltc_split_lts_broadcast_addr(struct gk20a *g, u32 addr, + u32 *priv_addr_table, + u32 *priv_addr_table_index) +{ + u32 num_ltc = g->ltc_count; + u32 i, start, ltc_num = 0; + u32 pltcg_base = ltc_pltcg_base_v(); + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + + for (i = 0; i < num_ltc; i++) { + start = pltcg_base + i * ltc_stride; + if ((addr >= start) && (addr < (start + ltc_stride))) { + ltc_num = i; + break; + } + } + gm20b_ltc_update_ltc_lts_addr(g, addr, ltc_num, priv_addr_table, + priv_addr_table_index); +} + +void gm20b_ltc_split_ltc_broadcast_addr(struct gk20a *g, u32 addr, + u32 *priv_addr_table, + u32 *priv_addr_table_index) +{ + u32 num_ltc = g->ltc_count; + u32 ltc_num; + + for (ltc_num = 0; ltc_num < num_ltc; ltc_num++) { + gm20b_ltc_update_ltc_lts_addr(g, addr, ltc_num, + priv_addr_table, priv_addr_table_index); + } +} diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h new file mode 100644 index 00000000..cc92c70a --- /dev/null +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gm20b.h @@ -0,0 +1,66 @@ +/* + * GM20B L2 + * + * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVGPU_LTC_GM20B +#define NVGPU_LTC_GM20B + +#include + +struct gk20a; +struct gr_gk20a; +struct gpu_ops; +struct zbc_entry; +enum gk20a_cbc_op; + +int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr); +int gm20b_determine_L2_size_bytes(struct gk20a *g); +void gm20b_ltc_set_zbc_color_entry(struct gk20a *g, + struct zbc_entry *color_val, + u32 index); +void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g, + struct zbc_entry *depth_val, + u32 index); +void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr); +void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled); +void gm20b_ltc_init_fs_state(struct gk20a *g); +int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, + u32 min, u32 max); +void gm20b_ltc_isr(struct gk20a *g); +u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base); +void gm20b_flush_ltc(struct gk20a *g); +int gm20b_ltc_alloc_phys_cbc(struct gk20a *g, + size_t compbit_backing_size); +int gm20b_ltc_alloc_virt_cbc(struct gk20a *g, + size_t compbit_backing_size); +bool gm20b_ltc_pri_is_ltc_addr(struct gk20a *g, u32 addr); +bool gm20b_ltc_is_ltcs_ltss_addr(struct gk20a *g, u32 addr); +bool gm20b_ltc_is_ltcn_ltss_addr(struct gk20a *g, u32 addr); +void gm20b_ltc_split_lts_broadcast_addr(struct gk20a *g, u32 addr, + u32 *priv_addr_table, + u32 *priv_addr_table_index); +void gm20b_ltc_split_ltc_broadcast_addr(struct gk20a *g, u32 addr, + u32 *priv_addr_table, + u32 *priv_addr_table_index); + +#endif diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c new file mode 100644 index 00000000..eb262add --- /dev/null +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.c @@ -0,0 +1,320 @@ +/* + * GP10B L2 + * + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include + +#include +#include + +#include "gk20a/gk20a.h" + +#include "ltc_gm20b.h" +#include "ltc_gp10b.h" + +int gp10b_determine_L2_size_bytes(struct gk20a *g) +{ + u32 tmp; + int ret; + + nvgpu_log_fn(g, " "); + + tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r()); + + ret = g->ltc_count * + ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 * + ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp); + + nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret); + + nvgpu_log_fn(g, "done"); + + return ret; +} + +int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) +{ + /* max memory size (MB) to cover */ + u32 max_size = gr->max_comptag_mem; + /* one tag line covers 64KB */ + u32 max_comptag_lines = max_size << 4U; + + u32 hw_max_comptag_lines = + ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(); + + u32 cbc_param = + gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); + u32 comptags_per_cacheline = + ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param); + u32 cbc_param2 = + gk20a_readl(g, ltc_ltcs_ltss_cbc_param2_r()); + u32 gobs_per_comptagline_per_slice = + ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(cbc_param2); + + u32 compbit_backing_size; + + int err; + + nvgpu_log_fn(g, " "); + + if (max_comptag_lines == 0U) + return 0; + + /* Already initialized */ + if (gr->max_comptag_lines) + return 0; + + if (max_comptag_lines > hw_max_comptag_lines) + max_comptag_lines = hw_max_comptag_lines; + + compbit_backing_size = + roundup(max_comptag_lines * gobs_per_comptagline_per_slice, + gr->cacheline_size); + compbit_backing_size = roundup( + compbit_backing_size * gr->slices_per_ltc * g->ltc_count, + g->ops.fb.compressible_page_size(g)); + + /* aligned to 2KB * ltc_count */ + compbit_backing_size += + g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); + + /* must be a multiple of 64KB */ + compbit_backing_size = roundup(compbit_backing_size, 64*1024); + + nvgpu_log_info(g, "compbit backing store size : %d", + compbit_backing_size); + nvgpu_log_info(g, "max comptag lines : %d", + max_comptag_lines); + nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d", + gobs_per_comptagline_per_slice); + + err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); + if (err) + return err; + + err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); + if (err) + return err; + + gr->max_comptag_lines = max_comptag_lines; + gr->comptags_per_cacheline = comptags_per_cacheline; + gr->gobs_per_comptagline_per_slice = gobs_per_comptagline_per_slice; + + return 0; +} + +int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, + u32 min, u32 max) +{ + struct gr_gk20a *gr = &g->gr; + struct nvgpu_timeout timeout; + int err = 0; + u32 ltc, slice, ctrl1, val, hw_op = 0U; + u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( + gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + const u32 max_lines = 16384U; + + nvgpu_log_fn(g, " "); + + trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); + + if (gr->compbit_store.mem.size == 0U) + return 0; + + while (1) { + const u32 iter_max = min(min + max_lines - 1, max); + bool full_cache_op = true; + + nvgpu_mutex_acquire(&g->mm.l2_op_lock); + + nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max); + + if (op == gk20a_cbc_op_clear) { + nvgpu_writel_check( + g, ltc_ltcs_ltss_cbc_ctrl2_r(), + ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f( + min)); + + nvgpu_writel_check( + g, ltc_ltcs_ltss_cbc_ctrl3_r(), + ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f( + iter_max)); + + hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(); + full_cache_op = false; + } else if (op == gk20a_cbc_op_clean) { + /* this is full-cache op */ + hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(); + } else if (op == gk20a_cbc_op_invalidate) { + /* this is full-cache op */ + hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(); + } else { + nvgpu_err(g, "Unknown op: %u", (unsigned)op); + err = -EINVAL; + goto out; + } + gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(), + gk20a_readl(g, + ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op); + + for (ltc = 0; ltc < g->ltc_count; ltc++) { + for (slice = 0; slice < slices_per_ltc; slice++) { + + ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() + + ltc * ltc_stride + slice * lts_stride; + + nvgpu_timeout_init(g, &timeout, 2000, + NVGPU_TIMER_RETRY_TIMER); + do { + val = gk20a_readl(g, ctrl1); + if (!(val & hw_op)) + break; + nvgpu_udelay(5); + } while (!nvgpu_timeout_expired(&timeout)); + + if (nvgpu_timeout_peek_expired(&timeout)) { + nvgpu_err(g, "comp tag clear timeout"); + err = -EBUSY; + goto out; + } + } + } + + /* are we done? */ + if (full_cache_op || iter_max == max) + break; + + /* note: iter_max is inclusive upper bound */ + min = iter_max + 1; + + /* give a chance for higher-priority threads to progress */ + nvgpu_mutex_release(&g->mm.l2_op_lock); + } +out: + trace_gk20a_ltc_cbc_ctrl_done(g->name); + nvgpu_mutex_release(&g->mm.l2_op_lock); + return err; +} + +void gp10b_ltc_isr(struct gk20a *g) +{ + u32 mc_intr, ltc_intr; + unsigned int ltc, slice; + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + + mc_intr = gk20a_readl(g, mc_intr_ltc_r()); + nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); + for (ltc = 0; ltc < g->ltc_count; ltc++) { + if ((mc_intr & 1U << ltc) == 0) + continue; + for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { + u32 offset = ltc_stride * ltc + lts_stride * slice; + ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset); + + /* Detect and handle ECC errors */ + if (ltc_intr & + ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) { + u32 ecc_stats_reg_val; + + nvgpu_err(g, + "Single bit error detected in GPU L2!"); + + ecc_stats_reg_val = + gk20a_readl(g, + ltc_ltc0_lts0_dstg_ecc_report_r() + offset); + g->ecc.ltc.ecc_sec_count[ltc][slice].counter += + ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(ecc_stats_reg_val); + ecc_stats_reg_val &= + ~(ltc_ltc0_lts0_dstg_ecc_report_sec_count_m()); + nvgpu_writel_check(g, + ltc_ltc0_lts0_dstg_ecc_report_r() + offset, + ecc_stats_reg_val); + g->ops.mm.l2_flush(g, true); + } + if (ltc_intr & + ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) { + u32 ecc_stats_reg_val; + + nvgpu_err(g, + "Double bit error detected in GPU L2!"); + + ecc_stats_reg_val = + gk20a_readl(g, + ltc_ltc0_lts0_dstg_ecc_report_r() + offset); + g->ecc.ltc.ecc_ded_count[ltc][slice].counter += + ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(ecc_stats_reg_val); + ecc_stats_reg_val &= + ~(ltc_ltc0_lts0_dstg_ecc_report_ded_count_m()); + nvgpu_writel_check(g, + ltc_ltc0_lts0_dstg_ecc_report_r() + offset, + ecc_stats_reg_val); + } + + nvgpu_err(g, "ltc%d, slice %d: %08x", + ltc, slice, ltc_intr); + nvgpu_writel_check(g, ltc_ltc0_lts0_intr_r() + + ltc_stride * ltc + lts_stride * slice, + ltc_intr); + } + } +} + +void gp10b_ltc_init_fs_state(struct gk20a *g) +{ + u32 ltc_intr; + + gm20b_ltc_init_fs_state(g); + + gk20a_writel(g, ltc_ltca_g_axi_pctrl_r(), + ltc_ltca_g_axi_pctrl_user_sid_f(g->ltc_streamid)); + + /* Enable ECC interrupts */ + ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); + ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() | + ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(); + gk20a_writel(g, ltc_ltcs_ltss_intr_r(), + ltc_intr); +} + +void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled) +{ + u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); + u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); + + if (enabled) + /* bypass disabled (normal caching ops)*/ + reg &= ~reg_f; + else + /* bypass enabled (no caching) */ + reg |= reg_f; + + nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); +} diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h new file mode 100644 index 00000000..c1a2bf64 --- /dev/null +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gp10b.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LTC_GP10B_H +#define LTC_GP10B_H +struct gpu_ops; + +void gp10b_ltc_isr(struct gk20a *g); + +int gp10b_determine_L2_size_bytes(struct gk20a *g); +int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr); +void gp10b_ltc_init_fs_state(struct gk20a *g); +int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, + u32 min, u32 max); +void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled); +#endif diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c new file mode 100644 index 00000000..98306079 --- /dev/null +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c @@ -0,0 +1,207 @@ +/* + * GV11B LTC + * + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include "gk20a/gk20a.h" + +#include "ltc_gp10b.h" +#include "ltc_gv11b.h" + +#include +#include +#include +#include + +#include + +/* + * Sets the ZBC stencil for the passed index. + */ +void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g, + struct zbc_entry *stencil_val, + u32 index) +{ + u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; + + nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); + + nvgpu_writel_check(g, + ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(), + stencil_val->depth); +} + +void gv11b_ltc_init_fs_state(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + u32 ltc_intr; + u32 reg; + + nvgpu_log_info(g, "initialize gv11b l2"); + + g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); + g->ltc_count = g->ops.priv_ring.enum_ltc(g); + nvgpu_log_info(g, "%u ltcs out of %u", g->ltc_count, g->max_ltc_count); + + reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); + gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);; + gr->cacheline_size = + 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg); + + /* Disable LTC interrupts */ + reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); + reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m(); + reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); + nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg); + + if (g->ops.ltc.intr_en_illegal_compstat) + g->ops.ltc.intr_en_illegal_compstat(g, + g->ltc_intr_en_illegal_compstat); + + /* Enable ECC interrupts */ + ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); + ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() | + ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(); + nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), + ltc_intr); +} + +void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable) +{ + u32 val; + + /* disble/enble illegal_compstat interrupt */ + val = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); + if (enable) + val = set_field(val, + ltc_ltcs_ltss_intr_en_illegal_compstat_m(), + ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f()); + else + val = set_field(val, + ltc_ltcs_ltss_intr_en_illegal_compstat_m(), + ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f()); + gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val); +} + + +void gv11b_ltc_isr(struct gk20a *g) +{ + u32 mc_intr, ltc_intr3; + unsigned int ltc, slice; + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt; + u32 corrected_delta, uncorrected_delta; + u32 corrected_overflow, uncorrected_overflow; + + mc_intr = gk20a_readl(g, mc_intr_ltc_r()); + for (ltc = 0; ltc < g->ltc_count; ltc++) { + if ((mc_intr & 1U << ltc) == 0) + continue; + + for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { + u32 offset = ltc_stride * ltc + lts_stride * slice; + ltc_intr3 = gk20a_readl(g, ltc_ltc0_lts0_intr3_r() + + offset); + + /* Detect and handle ECC PARITY errors */ + + if (ltc_intr3 & + (ltc_ltcs_ltss_intr3_ecc_uncorrected_m() | + ltc_ltcs_ltss_intr3_ecc_corrected_m())) { + + ecc_status = gk20a_readl(g, + ltc_ltc0_lts0_l2_cache_ecc_status_r() + + offset); + ecc_addr = gk20a_readl(g, + ltc_ltc0_lts0_l2_cache_ecc_address_r() + + offset); + corrected_cnt = gk20a_readl(g, + ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset); + uncorrected_cnt = gk20a_readl(g, + ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset); + + corrected_delta = + ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(corrected_cnt); + uncorrected_delta = + ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(uncorrected_cnt); + corrected_overflow = ecc_status & + ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m(); + + uncorrected_overflow = ecc_status & + ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m(); + + /* clear the interrupt */ + if ((corrected_delta > 0U) || corrected_overflow) { + nvgpu_writel_check(g, + ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0); + } + if ((uncorrected_delta > 0U) || uncorrected_overflow) { + nvgpu_writel_check(g, + ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0); + } + + nvgpu_writel_check(g, + ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset, + ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f()); + + /* update counters per slice */ + if (corrected_overflow) + corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s()); + if (uncorrected_overflow) + uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s()); + + g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta; + g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta; + nvgpu_log(g, gpu_dbg_intr, + "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3); + + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) + nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) + nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) + nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) + nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) + nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) + nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected"); + + if (corrected_overflow || uncorrected_overflow) + nvgpu_info(g, "ecc counter overflow!"); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error address: 0x%x", ecc_addr); + + } + + } + + } + + /* fallback to other interrupts */ + gp10b_ltc_isr(g); +} diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h new file mode 100644 index 00000000..9d33b9fb --- /dev/null +++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LTC_GV11B_H +#define LTC_GV11B_H +struct gk20a; + +void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g, + struct zbc_entry *stencil_val, + u32 index); +void gv11b_ltc_init_fs_state(struct gk20a *g); +void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable); +void gv11b_ltc_isr(struct gk20a *g); + +#endif diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 5bb91f62..f802cd56 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h @@ -194,6 +194,15 @@ struct gpu_ops { u32 (*cbc_fix_config)(struct gk20a *g, int base); void (*flush)(struct gk20a *g); void (*intr_en_illegal_compstat)(struct gk20a *g, bool enable); + bool (*pri_is_ltc_addr)(struct gk20a *g, u32 addr); + bool (*is_ltcs_ltss_addr)(struct gk20a *g, u32 addr); + bool (*is_ltcn_ltss_addr)(struct gk20a *g, u32 addr); + void (*split_lts_broadcast_addr)(struct gk20a *g, u32 addr, + u32 *priv_addr_table, + u32 *priv_addr_table_index); + void (*split_ltc_broadcast_addr)(struct gk20a *g, u32 addr, + u32 *priv_addr_table, + u32 *priv_addr_table_index); } ltc; struct { void (*isr_stall)(struct gk20a *g, u32 inst_id, u32 pri_base); @@ -274,15 +283,6 @@ struct gpu_ops { u32 *gpc_num, u32 *tpc_num); u32 (*get_tpc_num)(struct gk20a *g, u32 addr); u32 (*get_egpc_base)(struct gk20a *g); - bool (*is_ltcs_ltss_addr)(struct gk20a *g, u32 addr); - bool (*is_ltcn_ltss_addr)(struct gk20a *g, u32 addr); - bool (*get_lts_in_ltc_shared_base)(void); - void (*split_lts_broadcast_addr)(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index); - void (*split_ltc_broadcast_addr)(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index); void (*detect_sm_arch)(struct gk20a *g); int (*add_zbc_color)(struct gk20a *g, struct gr_gk20a *gr, struct zbc_entry *color_val, u32 index); diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index bb54e00e..fbba02ca 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -59,7 +59,6 @@ #include #include #include -#include #include #include @@ -6256,11 +6255,11 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, } *be_num = pri_get_be_num(g, addr); return 0; - } else if (pri_is_ltc_addr(addr)) { + } else if (g->ops.ltc.pri_is_ltc_addr(g, addr)) { *addr_type = CTXSW_ADDR_TYPE_LTCS; - if (g->ops.gr.is_ltcs_ltss_addr(g, addr)) + if (g->ops.ltc.is_ltcs_ltss_addr(g, addr)) *broadcast_flags |= PRI_BROADCAST_FLAGS_LTCS; - else if (g->ops.gr.is_ltcn_ltss_addr(g, addr)) + else if (g->ops.ltc.is_ltcn_ltss_addr(g, addr)) *broadcast_flags |= PRI_BROADCAST_FLAGS_LTSS; return 0; } else if (pri_is_fbpa_addr(g, addr)) { @@ -6398,10 +6397,10 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, broadcast_flags, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { - g->ops.gr.split_lts_broadcast_addr(g, addr, + g->ops.ltc.split_lts_broadcast_addr(g, addr, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) { - g->ops.gr.split_ltc_broadcast_addr(g, addr, + g->ops.ltc.split_ltc_broadcast_addr(g, addr, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) { g->ops.gr.split_fbpa_broadcast_addr(g, addr, diff --git a/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h index af390833..32a30d78 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h @@ -29,8 +29,6 @@ * of the context state store for gr/compute contexts. */ -#include - /* * GPC pri addressing */ @@ -227,14 +225,6 @@ static inline u32 pri_ppc_addr(struct gk20a *g, u32 addr, u32 gpc, u32 ppc) ppc_in_gpc_base + (ppc * ppc_in_gpc_stride) + addr; } -/* - * LTC pri addressing - */ -static inline bool pri_is_ltc_addr(u32 addr) -{ - return ((addr >= ltc_pltcg_base_v()) && (addr < ltc_pltcg_extent_v())); -} - enum ctxsw_addr_type { CTXSW_ADDR_TYPE_SYS = 0, CTXSW_ADDR_TYPE_GPC = 1, diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 5bba5d9c..ee63489e 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -52,7 +52,6 @@ #include #include #include -#include /* * GPU mapping life cycle diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c index 49b81783..abc39362 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -1438,81 +1437,6 @@ int gr_gm20b_get_preemption_mode_flags(struct gk20a *g, return 0; } -bool gr_gm20b_is_ltcs_ltss_addr(struct gk20a *g, u32 addr) -{ - u32 ltc_shared_base = ltc_ltcs_ltss_v(); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - - return (addr >= ltc_shared_base) && - (addr < (ltc_shared_base + lts_stride)); -} - -bool gr_gm20b_is_ltcn_ltss_addr(struct gk20a *g, u32 addr) -{ - u32 lts_shared_base = ltc_ltc0_ltss_v(); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - u32 addr_mask = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE) - 1; - u32 base_offset = lts_shared_base & addr_mask; - u32 end_offset = base_offset + lts_stride; - - return (!gr_gm20b_is_ltcs_ltss_addr(g, addr)) && - ((addr & addr_mask) >= base_offset) && - ((addr & addr_mask) < end_offset); -} - -static void gr_gm20b_update_ltc_lts_addr(struct gk20a *g, u32 addr, u32 ltc_num, - u32 *priv_addr_table, - u32 *priv_addr_table_index) -{ - u32 num_ltc_slices = g->ops.gr.get_max_lts_per_ltc(g); - u32 index = *priv_addr_table_index; - u32 lts_num; - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - - for (lts_num = 0; lts_num < num_ltc_slices; lts_num++) { - priv_addr_table[index++] = ltc_ltc0_lts0_v() + - ltc_num * ltc_stride + - lts_num * lts_stride + - (addr & (lts_stride - 1)); - } - - *priv_addr_table_index = index; -} - -void gr_gm20b_split_lts_broadcast_addr(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index) -{ - u32 num_ltc = g->ltc_count; - u32 i, start, ltc_num = 0; - u32 pltcg_base = ltc_pltcg_base_v(); - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - - for (i = 0; i < num_ltc; i++) { - start = pltcg_base + i * ltc_stride; - if ((addr >= start) && (addr < (start + ltc_stride))) { - ltc_num = i; - break; - } - } - gr_gm20b_update_ltc_lts_addr(g, addr, ltc_num, priv_addr_table, - priv_addr_table_index); -} - -void gr_gm20b_split_ltc_broadcast_addr(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index) -{ - u32 num_ltc = g->ltc_count; - u32 ltc_num; - - for (ltc_num = 0; ltc_num < num_ltc; ltc_num++) { - gr_gm20b_update_ltc_lts_addr(g, addr, ltc_num, - priv_addr_table, priv_addr_table_index); - } -} - void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, u32 global_esr) { diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h index 5c82fd65..9d8e5cdf 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h @@ -124,14 +124,6 @@ int gm20b_gr_clear_sm_error_state(struct gk20a *g, struct channel_gk20a *ch, u32 sm_id); int gr_gm20b_get_preemption_mode_flags(struct gk20a *g, struct nvgpu_preemption_modes_rec *preemption_modes_rec); -bool gr_gm20b_is_ltcs_ltss_addr(struct gk20a *g, u32 addr); -bool gr_gm20b_is_ltcn_ltss_addr(struct gk20a *g, u32 addr); -void gr_gm20b_split_lts_broadcast_addr(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index); -void gr_gm20b_split_ltc_broadcast_addr(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index); void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, u32 global_esr); u32 gr_gm20b_get_pmm_per_chiplet_offset(void); diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c index 05acc0cf..3b164f9c 100644 --- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c @@ -31,6 +31,7 @@ #include "common/fb/fb_gm20b.h" #include "common/therm/therm_gm20b.h" #include "common/therm/therm_gm20b.h" +#include "common/ltc/ltc_gm20b.h" #include "gk20a/gk20a.h" #include "gk20a/ce2_gk20a.h" @@ -45,9 +46,7 @@ #include "gk20a/gr_gk20a.h" #include "gk20a/tsg_gk20a.h" -#include "ltc_gm20b.h" #include "gr_gm20b.h" -#include "ltc_gm20b.h" #include "fifo_gm20b.h" #include "gr_ctx_gm20b.h" #include "mm_gm20b.h" @@ -200,6 +199,11 @@ static const struct gpu_ops gm20b_ops = { .cbc_fix_config = gm20b_ltc_cbc_fix_config, .flush = gm20b_flush_ltc, .set_enabled = gm20b_ltc_set_enabled, + .pri_is_ltc_addr = gm20b_ltc_pri_is_ltc_addr, + .is_ltcs_ltss_addr = gm20b_ltc_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gm20b_ltc_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gm20b_ltc_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gm20b_ltc_split_ltc_broadcast_addr, }, .ce2 = { .isr_stall = gk20a_ce2_isr, @@ -281,10 +285,6 @@ static const struct gpu_ops gm20b_ops = { .init_sm_id_table = gr_gk20a_init_sm_id_table, .load_smid_config = gr_gm20b_load_smid_config, .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering, - .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, - .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, - .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, - .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, .setup_rop_mapping = gr_gk20a_setup_rop_mapping, .program_zcull_mapping = gr_gk20a_program_zcull_mapping, .commit_global_timeslice = gr_gk20a_commit_global_timeslice, diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c deleted file mode 100644 index 65945fad..00000000 --- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c +++ /dev/null @@ -1,489 +0,0 @@ -/* - * GM20B L2 - * - * Copyright (c) 2014-2018 NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "gk20a/gk20a.h" - -#include "ltc_gm20b.h" - -int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) -{ - /* max memory size (MB) to cover */ - u32 max_size = gr->max_comptag_mem; - /* one tag line covers 128KB */ - u32 max_comptag_lines = max_size << 3U; - - u32 hw_max_comptag_lines = - ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(); - - u32 cbc_param = - gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); - u32 comptags_per_cacheline = - ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param); - - u32 compbit_backing_size; - - int err; - - nvgpu_log_fn(g, " "); - - if (max_comptag_lines == 0U) - return 0; - - if (max_comptag_lines > hw_max_comptag_lines) - max_comptag_lines = hw_max_comptag_lines; - - compbit_backing_size = - DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) * - gr->cacheline_size * gr->slices_per_ltc * g->ltc_count; - - /* aligned to 2KB * ltc_count */ - compbit_backing_size += - g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); - - /* must be a multiple of 64KB */ - compbit_backing_size = roundup(compbit_backing_size, 64*1024); - - max_comptag_lines = - (compbit_backing_size * comptags_per_cacheline) / - (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count); - - if (max_comptag_lines > hw_max_comptag_lines) - max_comptag_lines = hw_max_comptag_lines; - - nvgpu_log_info(g, "compbit backing store size : %d", - compbit_backing_size); - nvgpu_log_info(g, "max comptag lines : %d", - max_comptag_lines); - - err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); - if (err) - return err; - - err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); - if (err) - return err; - - gr->max_comptag_lines = max_comptag_lines; - gr->comptags_per_cacheline = comptags_per_cacheline; - - return 0; -} - -int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, - u32 min, u32 max) -{ - struct gr_gk20a *gr = &g->gr; - struct nvgpu_timeout timeout; - int err = 0; - u32 ltc, slice, ctrl1, val, hw_op = 0U; - u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( - gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - const u32 max_lines = 16384U; - - nvgpu_log_fn(g, " "); - - trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); - - if (gr->compbit_store.mem.size == 0) - return 0; - - while (1) { - const u32 iter_max = min(min + max_lines - 1, max); - bool full_cache_op = true; - - nvgpu_mutex_acquire(&g->mm.l2_op_lock); - - nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max); - - if (op == gk20a_cbc_op_clear) { - gk20a_writel( - g, ltc_ltcs_ltss_cbc_ctrl2_r(), - ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f( - min)); - gk20a_writel( - g, ltc_ltcs_ltss_cbc_ctrl3_r(), - ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f( - iter_max)); - hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(); - full_cache_op = false; - } else if (op == gk20a_cbc_op_clean) { - /* this is full-cache op */ - hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(); - } else if (op == gk20a_cbc_op_invalidate) { - /* this is full-cache op */ - hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(); - } else { - nvgpu_err(g, "Unknown op: %u", (unsigned)op); - err = -EINVAL; - goto out; - } - gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(), - gk20a_readl(g, - ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op); - - for (ltc = 0; ltc < g->ltc_count; ltc++) { - for (slice = 0; slice < slices_per_ltc; slice++) { - - ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() + - ltc * ltc_stride + slice * lts_stride; - - nvgpu_timeout_init(g, &timeout, 2000, - NVGPU_TIMER_RETRY_TIMER); - do { - val = gk20a_readl(g, ctrl1); - if (!(val & hw_op)) - break; - nvgpu_udelay(5); - } while (!nvgpu_timeout_expired(&timeout)); - - if (nvgpu_timeout_peek_expired(&timeout)) { - nvgpu_err(g, "comp tag clear timeout"); - err = -EBUSY; - goto out; - } - } - } - - /* are we done? */ - if (full_cache_op || iter_max == max) - break; - - /* note: iter_max is inclusive upper bound */ - min = iter_max + 1; - - /* give a chance for higher-priority threads to progress */ - nvgpu_mutex_release(&g->mm.l2_op_lock); - } -out: - trace_gk20a_ltc_cbc_ctrl_done(g->name); - nvgpu_mutex_release(&g->mm.l2_op_lock); - return err; -} - -void gm20b_ltc_init_fs_state(struct gk20a *g) -{ - struct gr_gk20a *gr = &g->gr; - u32 reg; - - nvgpu_log_info(g, "initialize gm20b l2"); - - g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); - g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); - nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count); - - reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); - gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);; - gr->cacheline_size = - 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg); - - gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(), - g->ltc_count); - gk20a_writel(g, ltc_ltcs_misc_ltc_num_active_ltcs_r(), - g->ltc_count); - - gk20a_writel(g, ltc_ltcs_ltss_dstg_cfg0_r(), - gk20a_readl(g, ltc_ltc0_lts0_dstg_cfg0_r()) | - ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m()); - - /* Disable LTC interrupts */ - reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); - reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m(); - reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); - reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_m(); - gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg); -} - -void gm20b_ltc_isr(struct gk20a *g) -{ - u32 mc_intr, ltc_intr; - unsigned int ltc, slice; - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - - mc_intr = gk20a_readl(g, mc_intr_ltc_r()); - nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); - for (ltc = 0; ltc < g->ltc_count; ltc++) { - if ((mc_intr & 1U << ltc) == 0) - continue; - for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { - ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + - ltc_stride * ltc + - lts_stride * slice); - nvgpu_err(g, "ltc%d, slice %d: %08x", - ltc, slice, ltc_intr); - gk20a_writel(g, ltc_ltc0_lts0_intr_r() + - ltc_stride * ltc + - lts_stride * slice, - ltc_intr); - } - } -} - -u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base) -{ - u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r()); - if (val == 2U) { - return base * 2; - } else if (val != 1) { - nvgpu_err(g, "Invalid number of active ltcs: %08x", val); - } - - return base; -} - -/* - * Performs a full flush of the L2 cache. - */ -void gm20b_flush_ltc(struct gk20a *g) -{ - struct nvgpu_timeout timeout; - unsigned int ltc; - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - - /* Clean... */ - nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt1_r(), - ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f() | - ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f() | - ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f() | - ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f() | - ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f() | - ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f()); - - /* Wait on each LTC individually. */ - for (ltc = 0; ltc < g->ltc_count; ltc++) { - u32 op_pending; - - /* - * Use 5ms - this should be sufficient time to flush the cache. - * On tegra, rough EMC BW available for old tegra chips (newer - * chips are strictly faster) can be estimated as follows: - * - * Lowest reasonable EMC clock speed will be around 102MHz on - * t124 for display enabled boards and generally fixed to max - * for non-display boards (since they are generally plugged in). - * - * Thus, the available BW is 64b * 2 * 102MHz = 1.3GB/s. Of that - * BW the GPU will likely get about half (display and overhead/ - * utilization inefficiency eating the rest) so 650MB/s at - * worst. Assuming at most 1MB of GPU L2 cache (less for most - * chips) worst case is we take 1MB/650MB/s = 1.5ms. - * - * So 5ms timeout here should be more than sufficient. - */ - nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER); - - do { - int cmgmt1 = ltc_ltc0_ltss_tstg_cmgmt1_r() + - ltc * ltc_stride; - op_pending = gk20a_readl(g, cmgmt1); - } while ((op_pending & - ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f()) && - !nvgpu_timeout_expired_msg(&timeout, - "L2 flush timeout!")); - } - - /* And invalidate. */ - nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt0_r(), - ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f() | - ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f() | - ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f() | - ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f() | - ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f()); - - /* Wait on each LTC individually. */ - for (ltc = 0; ltc < g->ltc_count; ltc++) { - u32 op_pending; - - /* Again, 5ms. */ - nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER); - - do { - int cmgmt0 = ltc_ltc0_ltss_tstg_cmgmt0_r() + - ltc * ltc_stride; - op_pending = gk20a_readl(g, cmgmt0); - } while ((op_pending & - ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f()) && - !nvgpu_timeout_expired_msg(&timeout, - "L2 flush timeout!")); - } -} - -int gm20b_determine_L2_size_bytes(struct gk20a *g) -{ - u32 lts_per_ltc; - u32 ways; - u32 sets; - u32 bytes_per_line; - u32 active_ltcs; - u32 cache_size; - - u32 tmp; - u32 active_sets_value; - - tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r()); - ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp)); - - active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp); - if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) { - sets = 64U; - } else if (active_sets_value == - ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) { - sets = 32U; - } else if (active_sets_value == - ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) { - sets = 16U; - } else { - nvgpu_err(g, "Unknown constant %u for active sets", - (unsigned)active_sets_value); - sets = 0U; - } - - active_ltcs = g->gr.num_fbps; - - /* chip-specific values */ - lts_per_ltc = 2U; - bytes_per_line = 128U; - cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line; - - return cache_size; -} - -/* - * Sets the ZBC color for the passed index. - */ -void gm20b_ltc_set_zbc_color_entry(struct gk20a *g, - struct zbc_entry *color_val, - u32 index) -{ - u32 i; - u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; - - nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(), - ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); - - for (i = 0; - i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) { - nvgpu_writel_check(g, - ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i), - color_val->color_l2[i]); - } -} - -/* - * Sets the ZBC depth for the passed index. - */ -void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g, - struct zbc_entry *depth_val, - u32 index) -{ - u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; - - nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(), - ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); - - nvgpu_writel_check(g, - ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(), - depth_val->depth); -} - -void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) -{ - u32 max_size = gr->max_comptag_mem; - u32 max_comptag_lines = max_size << 3U; - - u32 compbit_base_post_divide; - u64 compbit_base_post_multiply64; - u64 compbit_store_iova; - u64 compbit_base_post_divide64; - - if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) - compbit_store_iova = nvgpu_mem_get_phys_addr(g, - &gr->compbit_store.mem); - else - compbit_store_iova = nvgpu_mem_get_addr(g, - &gr->compbit_store.mem); - - compbit_base_post_divide64 = compbit_store_iova >> - ltc_ltcs_ltss_cbc_base_alignment_shift_v(); - - do_div(compbit_base_post_divide64, g->ltc_count); - compbit_base_post_divide = u64_lo32(compbit_base_post_divide64); - - compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * - g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); - - if (compbit_base_post_multiply64 < compbit_store_iova) - compbit_base_post_divide++; - - /* Bug 1477079 indicates sw adjustment on the posted divided base. */ - if (g->ops.ltc.cbc_fix_config) - compbit_base_post_divide = - g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); - - gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), - compbit_base_post_divide); - - nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, - "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", - (u32)(compbit_store_iova >> 32), - (u32)(compbit_store_iova & 0xffffffff), - compbit_base_post_divide); - - gr->compbit_store.base_hw = compbit_base_post_divide; - - g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate, - 0, max_comptag_lines - 1); - -} - -void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled) -{ - u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); - u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); - - if (enabled) - /* bypass disabled (normal caching ops)*/ - reg &= ~reg_f; - else - /* bypass enabled (no caching) */ - reg |= reg_f; - - gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); -} diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.h b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.h deleted file mode 100644 index 0f9145be..00000000 --- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * GM20B L2 - * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef _NVHOST_GM20B_LTC -#define _NVHOST_GM20B_LTC -struct gpu_ops; - -int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr); -int gm20b_determine_L2_size_bytes(struct gk20a *g); -void gm20b_ltc_set_zbc_color_entry(struct gk20a *g, - struct zbc_entry *color_val, - u32 index); -void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g, - struct zbc_entry *depth_val, - u32 index); -void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr); -void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled); -void gm20b_ltc_init_fs_state(struct gk20a *g); -int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, - u32 min, u32 max); -void gm20b_ltc_isr(struct gk20a *g); -u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base); -void gm20b_flush_ltc(struct gk20a *g); -int gm20b_ltc_alloc_phys_cbc(struct gk20a *g, - size_t compbit_backing_size); -int gm20b_ltc_alloc_virt_cbc(struct gk20a *g, - size_t compbit_backing_size); -#endif diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c index 02a2f0a6..1c5e1800 100644 --- a/drivers/gpu/nvgpu/gp106/hal_gp106.c +++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c @@ -36,6 +36,8 @@ #include "common/xve/xve_gp106.h" #include "common/therm/therm_gm20b.h" #include "common/therm/therm_gp106.h" +#include "common/ltc/ltc_gm20b.h" +#include "common/ltc/ltc_gp10b.h" #include "gk20a/gk20a.h" #include "gk20a/fifo_gk20a.h" @@ -49,7 +51,6 @@ #include "gk20a/pmu_gk20a.h" #include "gk20a/gr_gk20a.h" -#include "gp10b/ltc_gp10b.h" #include "gp10b/gr_gp10b.h" #include "gp10b/fecs_trace_gp10b.h" #include "gp10b/mc_gp10b.h" @@ -64,7 +65,6 @@ #include "gp106/fifo_gp106.h" #include "gp106/regops_gp106.h" -#include "gm20b/ltc_gm20b.h" #include "gm20b/gr_gm20b.h" #include "gm20b/fifo_gm20b.h" #include "gm20b/mm_gm20b.h" @@ -256,6 +256,11 @@ static const struct gpu_ops gp106_ops = { .cbc_fix_config = NULL, .flush = gm20b_flush_ltc, .set_enabled = gp10b_ltc_set_enabled, + .pri_is_ltc_addr = gm20b_ltc_pri_is_ltc_addr, + .is_ltcs_ltss_addr = gm20b_ltc_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gm20b_ltc_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gm20b_ltc_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gm20b_ltc_split_ltc_broadcast_addr, }, .ce2 = { .isr_stall = gp10b_ce_isr, @@ -340,10 +345,6 @@ static const struct gpu_ops gp106_ops = { .init_sm_id_table = gr_gk20a_init_sm_id_table, .load_smid_config = gr_gp10b_load_smid_config, .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering, - .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, - .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, - .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, - .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, .setup_rop_mapping = gr_gk20a_setup_rop_mapping, .program_zcull_mapping = gr_gk20a_program_zcull_mapping, .commit_global_timeslice = gr_gk20a_commit_global_timeslice, diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c index cfbdc6ce..b9d8c81a 100644 --- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c @@ -34,6 +34,8 @@ #include "common/fb/fb_gp10b.h" #include "common/therm/therm_gm20b.h" #include "common/therm/therm_gp10b.h" +#include "common/ltc/ltc_gm20b.h" +#include "common/ltc/ltc_gp10b.h" #include "gk20a/gk20a.h" #include "gk20a/fifo_gk20a.h" @@ -51,7 +53,6 @@ #include "gp10b/gr_gp10b.h" #include "gp10b/fecs_trace_gp10b.h" #include "gp10b/mc_gp10b.h" -#include "gp10b/ltc_gp10b.h" #include "gp10b/mm_gp10b.h" #include "gp10b/ce_gp10b.h" #include "gp10b/pmu_gp10b.h" @@ -60,7 +61,6 @@ #include "gp10b/regops_gp10b.h" #include "gp10b/ecc_gp10b.h" -#include "gm20b/ltc_gm20b.h" #include "gm20b/gr_gm20b.h" #include "gm20b/fifo_gm20b.h" #include "gm20b/acr_gm20b.h" @@ -214,6 +214,11 @@ static const struct gpu_ops gp10b_ops = { .cbc_fix_config = gm20b_ltc_cbc_fix_config, .flush = gm20b_flush_ltc, .set_enabled = gp10b_ltc_set_enabled, + .pri_is_ltc_addr = gm20b_ltc_pri_is_ltc_addr, + .is_ltcs_ltss_addr = gm20b_ltc_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gm20b_ltc_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gm20b_ltc_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gm20b_ltc_split_ltc_broadcast_addr, }, .ce2 = { .isr_stall = gp10b_ce_isr, @@ -299,10 +304,6 @@ static const struct gpu_ops gp10b_ops = { .init_sm_id_table = gr_gk20a_init_sm_id_table, .load_smid_config = gr_gp10b_load_smid_config, .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering, - .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, - .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, - .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, - .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, .setup_rop_mapping = gr_gk20a_setup_rop_mapping, .program_zcull_mapping = gr_gk20a_program_zcull_mapping, .commit_global_timeslice = gr_gk20a_commit_global_timeslice, diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c deleted file mode 100644 index 79ebe86d..00000000 --- a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c +++ /dev/null @@ -1,320 +0,0 @@ -/* - * GP10B L2 - * - * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include - -#include -#include -#include -#include - -#include -#include - -#include "gk20a/gk20a.h" -#include "gm20b/ltc_gm20b.h" - -#include "ltc_gp10b.h" - -int gp10b_determine_L2_size_bytes(struct gk20a *g) -{ - u32 tmp; - int ret; - - nvgpu_log_fn(g, " "); - - tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r()); - - ret = g->ltc_count * - ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 * - ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp); - - nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret); - - nvgpu_log_fn(g, "done"); - - return ret; -} - -int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) -{ - /* max memory size (MB) to cover */ - u32 max_size = gr->max_comptag_mem; - /* one tag line covers 64KB */ - u32 max_comptag_lines = max_size << 4U; - - u32 hw_max_comptag_lines = - ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(); - - u32 cbc_param = - gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); - u32 comptags_per_cacheline = - ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param); - u32 cbc_param2 = - gk20a_readl(g, ltc_ltcs_ltss_cbc_param2_r()); - u32 gobs_per_comptagline_per_slice = - ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(cbc_param2); - - u32 compbit_backing_size; - - int err; - - nvgpu_log_fn(g, " "); - - if (max_comptag_lines == 0U) - return 0; - - /* Already initialized */ - if (gr->max_comptag_lines) - return 0; - - if (max_comptag_lines > hw_max_comptag_lines) - max_comptag_lines = hw_max_comptag_lines; - - compbit_backing_size = - roundup(max_comptag_lines * gobs_per_comptagline_per_slice, - gr->cacheline_size); - compbit_backing_size = roundup( - compbit_backing_size * gr->slices_per_ltc * g->ltc_count, - g->ops.fb.compressible_page_size(g)); - - /* aligned to 2KB * ltc_count */ - compbit_backing_size += - g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); - - /* must be a multiple of 64KB */ - compbit_backing_size = roundup(compbit_backing_size, 64*1024); - - nvgpu_log_info(g, "compbit backing store size : %d", - compbit_backing_size); - nvgpu_log_info(g, "max comptag lines : %d", - max_comptag_lines); - nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d", - gobs_per_comptagline_per_slice); - - err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); - if (err) - return err; - - err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); - if (err) - return err; - - gr->max_comptag_lines = max_comptag_lines; - gr->comptags_per_cacheline = comptags_per_cacheline; - gr->gobs_per_comptagline_per_slice = gobs_per_comptagline_per_slice; - - return 0; -} - -int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, - u32 min, u32 max) -{ - struct gr_gk20a *gr = &g->gr; - struct nvgpu_timeout timeout; - int err = 0; - u32 ltc, slice, ctrl1, val, hw_op = 0U; - u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( - gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - const u32 max_lines = 16384U; - - nvgpu_log_fn(g, " "); - - trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); - - if (gr->compbit_store.mem.size == 0U) - return 0; - - while (1) { - const u32 iter_max = min(min + max_lines - 1, max); - bool full_cache_op = true; - - nvgpu_mutex_acquire(&g->mm.l2_op_lock); - - nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max); - - if (op == gk20a_cbc_op_clear) { - nvgpu_writel_check( - g, ltc_ltcs_ltss_cbc_ctrl2_r(), - ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f( - min)); - - nvgpu_writel_check( - g, ltc_ltcs_ltss_cbc_ctrl3_r(), - ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f( - iter_max)); - - hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(); - full_cache_op = false; - } else if (op == gk20a_cbc_op_clean) { - /* this is full-cache op */ - hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(); - } else if (op == gk20a_cbc_op_invalidate) { - /* this is full-cache op */ - hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(); - } else { - nvgpu_err(g, "Unknown op: %u", (unsigned)op); - err = -EINVAL; - goto out; - } - gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(), - gk20a_readl(g, - ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op); - - for (ltc = 0; ltc < g->ltc_count; ltc++) { - for (slice = 0; slice < slices_per_ltc; slice++) { - - ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() + - ltc * ltc_stride + slice * lts_stride; - - nvgpu_timeout_init(g, &timeout, 2000, - NVGPU_TIMER_RETRY_TIMER); - do { - val = gk20a_readl(g, ctrl1); - if (!(val & hw_op)) - break; - nvgpu_udelay(5); - } while (!nvgpu_timeout_expired(&timeout)); - - if (nvgpu_timeout_peek_expired(&timeout)) { - nvgpu_err(g, "comp tag clear timeout"); - err = -EBUSY; - goto out; - } - } - } - - /* are we done? */ - if (full_cache_op || iter_max == max) - break; - - /* note: iter_max is inclusive upper bound */ - min = iter_max + 1; - - /* give a chance for higher-priority threads to progress */ - nvgpu_mutex_release(&g->mm.l2_op_lock); - } -out: - trace_gk20a_ltc_cbc_ctrl_done(g->name); - nvgpu_mutex_release(&g->mm.l2_op_lock); - return err; -} - -void gp10b_ltc_isr(struct gk20a *g) -{ - u32 mc_intr, ltc_intr; - unsigned int ltc, slice; - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - - mc_intr = gk20a_readl(g, mc_intr_ltc_r()); - nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); - for (ltc = 0; ltc < g->ltc_count; ltc++) { - if ((mc_intr & 1U << ltc) == 0) - continue; - for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { - u32 offset = ltc_stride * ltc + lts_stride * slice; - ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset); - - /* Detect and handle ECC errors */ - if (ltc_intr & - ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) { - u32 ecc_stats_reg_val; - - nvgpu_err(g, - "Single bit error detected in GPU L2!"); - - ecc_stats_reg_val = - gk20a_readl(g, - ltc_ltc0_lts0_dstg_ecc_report_r() + offset); - g->ecc.ltc.ecc_sec_count[ltc][slice].counter += - ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(ecc_stats_reg_val); - ecc_stats_reg_val &= - ~(ltc_ltc0_lts0_dstg_ecc_report_sec_count_m()); - nvgpu_writel_check(g, - ltc_ltc0_lts0_dstg_ecc_report_r() + offset, - ecc_stats_reg_val); - g->ops.mm.l2_flush(g, true); - } - if (ltc_intr & - ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) { - u32 ecc_stats_reg_val; - - nvgpu_err(g, - "Double bit error detected in GPU L2!"); - - ecc_stats_reg_val = - gk20a_readl(g, - ltc_ltc0_lts0_dstg_ecc_report_r() + offset); - g->ecc.ltc.ecc_ded_count[ltc][slice].counter += - ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(ecc_stats_reg_val); - ecc_stats_reg_val &= - ~(ltc_ltc0_lts0_dstg_ecc_report_ded_count_m()); - nvgpu_writel_check(g, - ltc_ltc0_lts0_dstg_ecc_report_r() + offset, - ecc_stats_reg_val); - } - - nvgpu_err(g, "ltc%d, slice %d: %08x", - ltc, slice, ltc_intr); - nvgpu_writel_check(g, ltc_ltc0_lts0_intr_r() + - ltc_stride * ltc + lts_stride * slice, - ltc_intr); - } - } -} - -void gp10b_ltc_init_fs_state(struct gk20a *g) -{ - u32 ltc_intr; - - gm20b_ltc_init_fs_state(g); - - gk20a_writel(g, ltc_ltca_g_axi_pctrl_r(), - ltc_ltca_g_axi_pctrl_user_sid_f(g->ltc_streamid)); - - /* Enable ECC interrupts */ - ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); - ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() | - ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(); - gk20a_writel(g, ltc_ltcs_ltss_intr_r(), - ltc_intr); -} - -void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled) -{ - u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); - u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); - - if (enabled) - /* bypass disabled (normal caching ops)*/ - reg &= ~reg_f; - else - /* bypass enabled (no caching) */ - reg |= reg_f; - - nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); -} diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.h b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.h deleted file mode 100644 index c1a2bf64..00000000 --- a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LTC_GP10B_H -#define LTC_GP10B_H -struct gpu_ops; - -void gp10b_ltc_isr(struct gk20a *g); - -int gp10b_determine_L2_size_bytes(struct gk20a *g); -int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr); -void gp10b_ltc_init_fs_state(struct gk20a *g); -int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, - u32 min, u32 max); -void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled); -#endif diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c index 7926c35c..6904313b 100644 --- a/drivers/gpu/nvgpu/gv100/hal_gv100.c +++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c @@ -39,6 +39,9 @@ #include "common/therm/therm_gp106.h" #include "common/therm/therm_gp10b.h" #include "common/therm/therm_gv11b.h" +#include "common/ltc/ltc_gm20b.h" +#include "common/ltc/ltc_gp10b.h" +#include "common/ltc/ltc_gv11b.h" #include "gk20a/gk20a.h" #include "gk20a/fifo_gk20a.h" @@ -52,7 +55,6 @@ #include "gk20a/pmu_gk20a.h" #include "gk20a/gr_gk20a.h" -#include "gm20b/ltc_gm20b.h" #include "gm20b/gr_gm20b.h" #include "gm20b/fifo_gm20b.h" #include "gm20b/mm_gm20b.h" @@ -69,7 +71,6 @@ #include "gp106/flcn_gp106.h" #include "gp10b/gr_gp10b.h" -#include "gp10b/ltc_gp10b.h" #include "gp10b/mc_gp10b.h" #include "gp10b/ce_gp10b.h" #include "gp10b/fifo_gp10b.h" @@ -83,7 +84,6 @@ #include "gv11b/hal_gv11b.h" #include "gv11b/gr_gv11b.h" #include "gv11b/mc_gv11b.h" -#include "gv11b/ltc_gv11b.h" #include "gv11b/gv11b.h" #include "gv11b/ce_gv11b.h" #include "gv11b/mm_gv11b.h" @@ -288,6 +288,11 @@ static const struct gpu_ops gv100_ops = { .flush = gm20b_flush_ltc, .set_enabled = gp10b_ltc_set_enabled, .intr_en_illegal_compstat = gv11b_ltc_intr_en_illegal_compstat, + .pri_is_ltc_addr = gm20b_ltc_pri_is_ltc_addr, + .is_ltcs_ltss_addr = gm20b_ltc_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gm20b_ltc_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gm20b_ltc_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gm20b_ltc_split_ltc_broadcast_addr, }, .ce2 = { .isr_stall = gv11b_ce_isr, @@ -377,10 +382,6 @@ static const struct gpu_ops gv100_ops = { .init_sm_id_table = gr_gv100_init_sm_id_table, .load_smid_config = gr_gv11b_load_smid_config, .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering, - .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, - .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, - .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, - .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, .setup_rop_mapping = gr_gv11b_setup_rop_mapping, .program_zcull_mapping = gr_gv11b_program_zcull_mapping, .commit_global_timeslice = gr_gv11b_commit_global_timeslice, diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index c2cf909a..41d2f695 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c @@ -4716,11 +4716,11 @@ int gr_gv11b_decode_priv_addr(struct gk20a *g, u32 addr, } *be_num = pri_get_be_num(g, addr); return 0; - } else if (pri_is_ltc_addr(addr)) { + } else if (g->ops.ltc.pri_is_ltc_addr(g, addr)) { *addr_type = CTXSW_ADDR_TYPE_LTCS; - if (g->ops.gr.is_ltcs_ltss_addr(g, addr)) + if (g->ops.ltc.is_ltcs_ltss_addr(g, addr)) *broadcast_flags |= PRI_BROADCAST_FLAGS_LTCS; - else if (g->ops.gr.is_ltcn_ltss_addr(g, addr)) + else if (g->ops.ltc.is_ltcn_ltss_addr(g, addr)) *broadcast_flags |= PRI_BROADCAST_FLAGS_LTSS; return 0; } else if (pri_is_fbpa_addr(g, addr)) { @@ -4928,10 +4928,10 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g, g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, broadcast_flags, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTSS) { - g->ops.gr.split_lts_broadcast_addr(g, addr, + g->ops.ltc.split_lts_broadcast_addr(g, addr, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_LTCS) { - g->ops.gr.split_ltc_broadcast_addr(g, addr, + g->ops.ltc.split_ltc_broadcast_addr(g, addr, priv_addr_table, &t); } else if (broadcast_flags & PRI_BROADCAST_FLAGS_FBPA) { g->ops.gr.split_fbpa_broadcast_addr(g, addr, diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c index 111a1ea2..3772649e 100644 --- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c @@ -36,6 +36,9 @@ #include "common/therm/therm_gm20b.h" #include "common/therm/therm_gp10b.h" #include "common/therm/therm_gv11b.h" +#include "common/ltc/ltc_gm20b.h" +#include "common/ltc/ltc_gp10b.h" +#include "common/ltc/ltc_gv11b.h" #include "gk20a/gk20a.h" #include "gk20a/fifo_gk20a.h" @@ -49,14 +52,12 @@ #include "gk20a/pmu_gk20a.h" #include "gk20a/gr_gk20a.h" -#include "gm20b/ltc_gm20b.h" #include "gm20b/gr_gm20b.h" #include "gm20b/fifo_gm20b.h" #include "gm20b/mm_gm20b.h" #include "gm20b/acr_gm20b.h" #include "gm20b/pmu_gm20b.h" -#include "gp10b/ltc_gp10b.h" #include "gp10b/mc_gp10b.h" #include "gp10b/ce_gp10b.h" #include "gp10b/fifo_gp10b.h" @@ -76,7 +77,6 @@ #include "css_gr_gv11b.h" #include "gr_gv11b.h" #include "mc_gv11b.h" -#include "ltc_gv11b.h" #include "gv11b.h" #include "ce_gv11b.h" #include "gr_ctx_gv11b.h" @@ -245,6 +245,11 @@ static const struct gpu_ops gv11b_ops = { .flush = gm20b_flush_ltc, .set_enabled = gp10b_ltc_set_enabled, .intr_en_illegal_compstat = gv11b_ltc_intr_en_illegal_compstat, + .pri_is_ltc_addr = gm20b_ltc_pri_is_ltc_addr, + .is_ltcs_ltss_addr = gm20b_ltc_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gm20b_ltc_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gm20b_ltc_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gm20b_ltc_split_ltc_broadcast_addr, }, .ce2 = { .isr_stall = gv11b_ce_isr, @@ -332,10 +337,6 @@ static const struct gpu_ops gv11b_ops = { .init_sm_id_table = gr_gv100_init_sm_id_table, .load_smid_config = gr_gv11b_load_smid_config, .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering, - .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, - .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, - .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, - .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, .setup_rop_mapping = gr_gv11b_setup_rop_mapping, .program_zcull_mapping = gr_gv11b_program_zcull_mapping, .commit_global_timeslice = gr_gv11b_commit_global_timeslice, diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c deleted file mode 100644 index d7c385a9..00000000 --- a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * GV11B LTC - * - * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include -#include "gk20a/gk20a.h" -#include "gp10b/ltc_gp10b.h" - -#include "ltc_gv11b.h" - -#include -#include -#include -#include - -#include - -/* - * Sets the ZBC stencil for the passed index. - */ -void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g, - struct zbc_entry *stencil_val, - u32 index) -{ - u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; - - nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(), - ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); - - nvgpu_writel_check(g, - ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(), - stencil_val->depth); -} - -void gv11b_ltc_init_fs_state(struct gk20a *g) -{ - struct gr_gk20a *gr = &g->gr; - u32 ltc_intr; - u32 reg; - - nvgpu_log_info(g, "initialize gv11b l2"); - - g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); - g->ltc_count = g->ops.priv_ring.enum_ltc(g); - nvgpu_log_info(g, "%u ltcs out of %u", g->ltc_count, g->max_ltc_count); - - reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); - gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);; - gr->cacheline_size = - 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg); - - /* Disable LTC interrupts */ - reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); - reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m(); - reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); - nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg); - - if (g->ops.ltc.intr_en_illegal_compstat) - g->ops.ltc.intr_en_illegal_compstat(g, - g->ltc_intr_en_illegal_compstat); - - /* Enable ECC interrupts */ - ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); - ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() | - ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(); - nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), - ltc_intr); -} - -void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable) -{ - u32 val; - - /* disble/enble illegal_compstat interrupt */ - val = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); - if (enable) - val = set_field(val, - ltc_ltcs_ltss_intr_en_illegal_compstat_m(), - ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f()); - else - val = set_field(val, - ltc_ltcs_ltss_intr_en_illegal_compstat_m(), - ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f()); - gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val); -} - - -void gv11b_ltc_isr(struct gk20a *g) -{ - u32 mc_intr, ltc_intr3; - unsigned int ltc, slice; - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt; - u32 corrected_delta, uncorrected_delta; - u32 corrected_overflow, uncorrected_overflow; - - mc_intr = gk20a_readl(g, mc_intr_ltc_r()); - for (ltc = 0; ltc < g->ltc_count; ltc++) { - if ((mc_intr & 1U << ltc) == 0) - continue; - - for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { - u32 offset = ltc_stride * ltc + lts_stride * slice; - ltc_intr3 = gk20a_readl(g, ltc_ltc0_lts0_intr3_r() + - offset); - - /* Detect and handle ECC PARITY errors */ - - if (ltc_intr3 & - (ltc_ltcs_ltss_intr3_ecc_uncorrected_m() | - ltc_ltcs_ltss_intr3_ecc_corrected_m())) { - - ecc_status = gk20a_readl(g, - ltc_ltc0_lts0_l2_cache_ecc_status_r() + - offset); - ecc_addr = gk20a_readl(g, - ltc_ltc0_lts0_l2_cache_ecc_address_r() + - offset); - corrected_cnt = gk20a_readl(g, - ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset); - uncorrected_cnt = gk20a_readl(g, - ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset); - - corrected_delta = - ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(corrected_cnt); - uncorrected_delta = - ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(uncorrected_cnt); - corrected_overflow = ecc_status & - ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m(); - - uncorrected_overflow = ecc_status & - ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m(); - - /* clear the interrupt */ - if ((corrected_delta > 0U) || corrected_overflow) { - nvgpu_writel_check(g, - ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0); - } - if ((uncorrected_delta > 0U) || uncorrected_overflow) { - nvgpu_writel_check(g, - ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0); - } - - nvgpu_writel_check(g, - ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset, - ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f()); - - /* update counters per slice */ - if (corrected_overflow) - corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s()); - if (uncorrected_overflow) - uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s()); - - g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta; - g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta; - nvgpu_log(g, gpu_dbg_intr, - "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3); - - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) - nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) - nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) - nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) - nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) - nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected"); - if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) - nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected"); - - if (corrected_overflow || uncorrected_overflow) - nvgpu_info(g, "ecc counter overflow!"); - - nvgpu_log(g, gpu_dbg_intr, - "ecc error address: 0x%x", ecc_addr); - - } - - } - - } - - /* fallback to other interrupts */ - gp10b_ltc_isr(g); -} diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h deleted file mode 100644 index 9d33b9fb..00000000 --- a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef LTC_GV11B_H -#define LTC_GV11B_H -struct gk20a; - -void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g, - struct zbc_entry *stencil_val, - u32 index); -void gv11b_ltc_init_fs_state(struct gk20a *g); -void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable); -void gv11b_ltc_isr(struct gk20a *g); - -#endif diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c index aadd17d6..a4ad64a8 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_hal_gp10b.c @@ -30,6 +30,8 @@ #include "common/fb/fb_gp10b.h" #include "common/therm/therm_gm20b.h" #include "common/therm/therm_gp10b.h" +#include "common/ltc/ltc_gm20b.h" +#include "common/ltc/ltc_gp10b.h" #include "vgpu/fifo_vgpu.h" #include "vgpu/gr_vgpu.h" @@ -50,7 +52,6 @@ #include "gk20a/dbg_gpu_gk20a.h" #include "gp10b/mc_gp10b.h" -#include "gp10b/ltc_gp10b.h" #include "gp10b/mm_gp10b.h" #include "gp10b/ce_gp10b.h" #include "gp10b/pmu_gp10b.h" @@ -60,7 +61,6 @@ #include "gp10b/regops_gp10b.h" #include "gp10b/fuse_gp10b.h" -#include "gm20b/ltc_gm20b.h" #include "gm20b/gr_gm20b.h" #include "gm20b/fifo_gm20b.h" #include "gm20b/acr_gm20b.h" @@ -91,6 +91,11 @@ static const struct gpu_ops vgpu_gp10b_ops = { .cbc_fix_config = gm20b_ltc_cbc_fix_config, .flush = gm20b_flush_ltc, .set_enabled = NULL, + .pri_is_ltc_addr = gm20b_ltc_pri_is_ltc_addr, + .is_ltcs_ltss_addr = gm20b_ltc_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gm20b_ltc_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gm20b_ltc_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gm20b_ltc_split_ltc_broadcast_addr, }, .ce2 = { .isr_stall = gp10b_ce_isr, @@ -173,10 +178,6 @@ static const struct gpu_ops vgpu_gp10b_ops = { .init_sm_id_table = vgpu_gr_init_sm_id_table, .load_smid_config = gr_gp10b_load_smid_config, .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering, - .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, - .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, - .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, - .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, .setup_rop_mapping = gr_gk20a_setup_rop_mapping, .program_zcull_mapping = gr_gk20a_program_zcull_mapping, .commit_global_timeslice = gr_gk20a_commit_global_timeslice, diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c index e2410e4e..a02c47f2 100644 --- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c @@ -32,6 +32,9 @@ #include "common/therm/therm_gm20b.h" #include "common/therm/therm_gp10b.h" #include "common/therm/therm_gv11b.h" +#include "common/ltc/ltc_gm20b.h" +#include "common/ltc/ltc_gp10b.h" +#include "common/ltc/ltc_gv11b.h" #include #include @@ -58,7 +61,6 @@ #include #include #include -#include #include #include @@ -66,7 +68,6 @@ #include #include "gp10b/gr_gp10b.h" #include -#include #include #include @@ -80,7 +81,6 @@ #include #include #include -#include #include #include @@ -110,6 +110,11 @@ static const struct gpu_ops vgpu_gv11b_ops = { .isr = gv11b_ltc_isr, .flush = gm20b_flush_ltc, .set_enabled = NULL, + .pri_is_ltc_addr = gm20b_ltc_pri_is_ltc_addr, + .is_ltcs_ltss_addr = gm20b_ltc_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gm20b_ltc_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gm20b_ltc_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gm20b_ltc_split_ltc_broadcast_addr, }, .ce2 = { .isr_stall = gv11b_ce_isr, @@ -191,10 +196,6 @@ static const struct gpu_ops vgpu_gv11b_ops = { .init_sm_id_table = vgpu_gr_init_sm_id_table, .load_smid_config = gr_gv11b_load_smid_config, .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering, - .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, - .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, - .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, - .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, .setup_rop_mapping = gr_gv11b_setup_rop_mapping, .program_zcull_mapping = gr_gv11b_program_zcull_mapping, .commit_global_timeslice = gr_gv11b_commit_global_timeslice, -- cgit v1.2.2