From 974d541623929fa2622d27d5d338a5b63596794b Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Mon, 13 Aug 2018 12:58:18 -0700 Subject: gpu: nvgpu: Move ltc HAL to common Move implementation of ltc HAL to common/ltc. JIRA NVGPU-956 Change-Id: Id78d74e8612d7dacfb8d322d491abecd798e42b5 Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1798461 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gm20b/gr_gm20b.c | 76 ------ drivers/gpu/nvgpu/gm20b/gr_gm20b.h | 8 - drivers/gpu/nvgpu/gm20b/hal_gm20b.c | 12 +- drivers/gpu/nvgpu/gm20b/ltc_gm20b.c | 489 ------------------------------------ drivers/gpu/nvgpu/gm20b/ltc_gm20b.h | 49 ---- 5 files changed, 6 insertions(+), 628 deletions(-) delete mode 100644 drivers/gpu/nvgpu/gm20b/ltc_gm20b.c delete mode 100644 drivers/gpu/nvgpu/gm20b/ltc_gm20b.h (limited to 'drivers/gpu/nvgpu/gm20b') diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c index 49b81783..abc39362 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -1438,81 +1437,6 @@ int gr_gm20b_get_preemption_mode_flags(struct gk20a *g, return 0; } -bool gr_gm20b_is_ltcs_ltss_addr(struct gk20a *g, u32 addr) -{ - u32 ltc_shared_base = ltc_ltcs_ltss_v(); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - - return (addr >= ltc_shared_base) && - (addr < (ltc_shared_base + lts_stride)); -} - -bool gr_gm20b_is_ltcn_ltss_addr(struct gk20a *g, u32 addr) -{ - u32 lts_shared_base = ltc_ltc0_ltss_v(); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - u32 addr_mask = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE) - 1; - u32 base_offset = lts_shared_base & addr_mask; - u32 end_offset = base_offset + lts_stride; - - return (!gr_gm20b_is_ltcs_ltss_addr(g, addr)) && - ((addr & addr_mask) >= base_offset) && - ((addr & addr_mask) < end_offset); -} - -static void gr_gm20b_update_ltc_lts_addr(struct gk20a *g, u32 addr, u32 ltc_num, - u32 *priv_addr_table, - u32 *priv_addr_table_index) -{ - u32 num_ltc_slices = g->ops.gr.get_max_lts_per_ltc(g); - u32 index = *priv_addr_table_index; - u32 lts_num; - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - - for (lts_num = 0; lts_num < num_ltc_slices; lts_num++) { - priv_addr_table[index++] = ltc_ltc0_lts0_v() + - ltc_num * ltc_stride + - lts_num * lts_stride + - (addr & (lts_stride - 1)); - } - - *priv_addr_table_index = index; -} - -void gr_gm20b_split_lts_broadcast_addr(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index) -{ - u32 num_ltc = g->ltc_count; - u32 i, start, ltc_num = 0; - u32 pltcg_base = ltc_pltcg_base_v(); - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - - for (i = 0; i < num_ltc; i++) { - start = pltcg_base + i * ltc_stride; - if ((addr >= start) && (addr < (start + ltc_stride))) { - ltc_num = i; - break; - } - } - gr_gm20b_update_ltc_lts_addr(g, addr, ltc_num, priv_addr_table, - priv_addr_table_index); -} - -void gr_gm20b_split_ltc_broadcast_addr(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index) -{ - u32 num_ltc = g->ltc_count; - u32 ltc_num; - - for (ltc_num = 0; ltc_num < num_ltc; ltc_num++) { - gr_gm20b_update_ltc_lts_addr(g, addr, ltc_num, - priv_addr_table, priv_addr_table_index); - } -} - void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, u32 global_esr) { diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h index 5c82fd65..9d8e5cdf 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h @@ -124,14 +124,6 @@ int gm20b_gr_clear_sm_error_state(struct gk20a *g, struct channel_gk20a *ch, u32 sm_id); int gr_gm20b_get_preemption_mode_flags(struct gk20a *g, struct nvgpu_preemption_modes_rec *preemption_modes_rec); -bool gr_gm20b_is_ltcs_ltss_addr(struct gk20a *g, u32 addr); -bool gr_gm20b_is_ltcn_ltss_addr(struct gk20a *g, u32 addr); -void gr_gm20b_split_lts_broadcast_addr(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index); -void gr_gm20b_split_ltc_broadcast_addr(struct gk20a *g, u32 addr, - u32 *priv_addr_table, - u32 *priv_addr_table_index); void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, u32 global_esr); u32 gr_gm20b_get_pmm_per_chiplet_offset(void); diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c index 05acc0cf..3b164f9c 100644 --- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c @@ -31,6 +31,7 @@ #include "common/fb/fb_gm20b.h" #include "common/therm/therm_gm20b.h" #include "common/therm/therm_gm20b.h" +#include "common/ltc/ltc_gm20b.h" #include "gk20a/gk20a.h" #include "gk20a/ce2_gk20a.h" @@ -45,9 +46,7 @@ #include "gk20a/gr_gk20a.h" #include "gk20a/tsg_gk20a.h" -#include "ltc_gm20b.h" #include "gr_gm20b.h" -#include "ltc_gm20b.h" #include "fifo_gm20b.h" #include "gr_ctx_gm20b.h" #include "mm_gm20b.h" @@ -200,6 +199,11 @@ static const struct gpu_ops gm20b_ops = { .cbc_fix_config = gm20b_ltc_cbc_fix_config, .flush = gm20b_flush_ltc, .set_enabled = gm20b_ltc_set_enabled, + .pri_is_ltc_addr = gm20b_ltc_pri_is_ltc_addr, + .is_ltcs_ltss_addr = gm20b_ltc_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gm20b_ltc_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gm20b_ltc_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gm20b_ltc_split_ltc_broadcast_addr, }, .ce2 = { .isr_stall = gk20a_ce2_isr, @@ -281,10 +285,6 @@ static const struct gpu_ops gm20b_ops = { .init_sm_id_table = gr_gk20a_init_sm_id_table, .load_smid_config = gr_gm20b_load_smid_config, .program_sm_id_numbering = gr_gm20b_program_sm_id_numbering, - .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, - .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, - .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, - .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, .setup_rop_mapping = gr_gk20a_setup_rop_mapping, .program_zcull_mapping = gr_gk20a_program_zcull_mapping, .commit_global_timeslice = gr_gk20a_commit_global_timeslice, diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c deleted file mode 100644 index 65945fad..00000000 --- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c +++ /dev/null @@ -1,489 +0,0 @@ -/* - * GM20B L2 - * - * Copyright (c) 2014-2018 NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "gk20a/gk20a.h" - -#include "ltc_gm20b.h" - -int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) -{ - /* max memory size (MB) to cover */ - u32 max_size = gr->max_comptag_mem; - /* one tag line covers 128KB */ - u32 max_comptag_lines = max_size << 3U; - - u32 hw_max_comptag_lines = - ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(); - - u32 cbc_param = - gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); - u32 comptags_per_cacheline = - ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param); - - u32 compbit_backing_size; - - int err; - - nvgpu_log_fn(g, " "); - - if (max_comptag_lines == 0U) - return 0; - - if (max_comptag_lines > hw_max_comptag_lines) - max_comptag_lines = hw_max_comptag_lines; - - compbit_backing_size = - DIV_ROUND_UP(max_comptag_lines, comptags_per_cacheline) * - gr->cacheline_size * gr->slices_per_ltc * g->ltc_count; - - /* aligned to 2KB * ltc_count */ - compbit_backing_size += - g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); - - /* must be a multiple of 64KB */ - compbit_backing_size = roundup(compbit_backing_size, 64*1024); - - max_comptag_lines = - (compbit_backing_size * comptags_per_cacheline) / - (gr->cacheline_size * gr->slices_per_ltc * g->ltc_count); - - if (max_comptag_lines > hw_max_comptag_lines) - max_comptag_lines = hw_max_comptag_lines; - - nvgpu_log_info(g, "compbit backing store size : %d", - compbit_backing_size); - nvgpu_log_info(g, "max comptag lines : %d", - max_comptag_lines); - - err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); - if (err) - return err; - - err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines); - if (err) - return err; - - gr->max_comptag_lines = max_comptag_lines; - gr->comptags_per_cacheline = comptags_per_cacheline; - - return 0; -} - -int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, - u32 min, u32 max) -{ - struct gr_gk20a *gr = &g->gr; - struct nvgpu_timeout timeout; - int err = 0; - u32 ltc, slice, ctrl1, val, hw_op = 0U; - u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( - gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - const u32 max_lines = 16384U; - - nvgpu_log_fn(g, " "); - - trace_gk20a_ltc_cbc_ctrl_start(g->name, op, min, max); - - if (gr->compbit_store.mem.size == 0) - return 0; - - while (1) { - const u32 iter_max = min(min + max_lines - 1, max); - bool full_cache_op = true; - - nvgpu_mutex_acquire(&g->mm.l2_op_lock); - - nvgpu_log_info(g, "clearing CBC lines %u..%u", min, iter_max); - - if (op == gk20a_cbc_op_clear) { - gk20a_writel( - g, ltc_ltcs_ltss_cbc_ctrl2_r(), - ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f( - min)); - gk20a_writel( - g, ltc_ltcs_ltss_cbc_ctrl3_r(), - ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f( - iter_max)); - hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(); - full_cache_op = false; - } else if (op == gk20a_cbc_op_clean) { - /* this is full-cache op */ - hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(); - } else if (op == gk20a_cbc_op_invalidate) { - /* this is full-cache op */ - hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(); - } else { - nvgpu_err(g, "Unknown op: %u", (unsigned)op); - err = -EINVAL; - goto out; - } - gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(), - gk20a_readl(g, - ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op); - - for (ltc = 0; ltc < g->ltc_count; ltc++) { - for (slice = 0; slice < slices_per_ltc; slice++) { - - ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() + - ltc * ltc_stride + slice * lts_stride; - - nvgpu_timeout_init(g, &timeout, 2000, - NVGPU_TIMER_RETRY_TIMER); - do { - val = gk20a_readl(g, ctrl1); - if (!(val & hw_op)) - break; - nvgpu_udelay(5); - } while (!nvgpu_timeout_expired(&timeout)); - - if (nvgpu_timeout_peek_expired(&timeout)) { - nvgpu_err(g, "comp tag clear timeout"); - err = -EBUSY; - goto out; - } - } - } - - /* are we done? */ - if (full_cache_op || iter_max == max) - break; - - /* note: iter_max is inclusive upper bound */ - min = iter_max + 1; - - /* give a chance for higher-priority threads to progress */ - nvgpu_mutex_release(&g->mm.l2_op_lock); - } -out: - trace_gk20a_ltc_cbc_ctrl_done(g->name); - nvgpu_mutex_release(&g->mm.l2_op_lock); - return err; -} - -void gm20b_ltc_init_fs_state(struct gk20a *g) -{ - struct gr_gk20a *gr = &g->gr; - u32 reg; - - nvgpu_log_info(g, "initialize gm20b l2"); - - g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); - g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); - nvgpu_log_info(g, "%d ltcs out of %d", g->ltc_count, g->max_ltc_count); - - reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()); - gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);; - gr->cacheline_size = - 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg); - - gk20a_writel(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r(), - g->ltc_count); - gk20a_writel(g, ltc_ltcs_misc_ltc_num_active_ltcs_r(), - g->ltc_count); - - gk20a_writel(g, ltc_ltcs_ltss_dstg_cfg0_r(), - gk20a_readl(g, ltc_ltc0_lts0_dstg_cfg0_r()) | - ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m()); - - /* Disable LTC interrupts */ - reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); - reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m(); - reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); - reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_m(); - gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg); -} - -void gm20b_ltc_isr(struct gk20a *g) -{ - u32 mc_intr, ltc_intr; - unsigned int ltc, slice; - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); - - mc_intr = gk20a_readl(g, mc_intr_ltc_r()); - nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr); - for (ltc = 0; ltc < g->ltc_count; ltc++) { - if ((mc_intr & 1U << ltc) == 0) - continue; - for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { - ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + - ltc_stride * ltc + - lts_stride * slice); - nvgpu_err(g, "ltc%d, slice %d: %08x", - ltc, slice, ltc_intr); - gk20a_writel(g, ltc_ltc0_lts0_intr_r() + - ltc_stride * ltc + - lts_stride * slice, - ltc_intr); - } - } -} - -u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base) -{ - u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r()); - if (val == 2U) { - return base * 2; - } else if (val != 1) { - nvgpu_err(g, "Invalid number of active ltcs: %08x", val); - } - - return base; -} - -/* - * Performs a full flush of the L2 cache. - */ -void gm20b_flush_ltc(struct gk20a *g) -{ - struct nvgpu_timeout timeout; - unsigned int ltc; - u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); - - /* Clean... */ - nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt1_r(), - ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f() | - ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f() | - ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f() | - ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f() | - ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f() | - ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f()); - - /* Wait on each LTC individually. */ - for (ltc = 0; ltc < g->ltc_count; ltc++) { - u32 op_pending; - - /* - * Use 5ms - this should be sufficient time to flush the cache. - * On tegra, rough EMC BW available for old tegra chips (newer - * chips are strictly faster) can be estimated as follows: - * - * Lowest reasonable EMC clock speed will be around 102MHz on - * t124 for display enabled boards and generally fixed to max - * for non-display boards (since they are generally plugged in). - * - * Thus, the available BW is 64b * 2 * 102MHz = 1.3GB/s. Of that - * BW the GPU will likely get about half (display and overhead/ - * utilization inefficiency eating the rest) so 650MB/s at - * worst. Assuming at most 1MB of GPU L2 cache (less for most - * chips) worst case is we take 1MB/650MB/s = 1.5ms. - * - * So 5ms timeout here should be more than sufficient. - */ - nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER); - - do { - int cmgmt1 = ltc_ltc0_ltss_tstg_cmgmt1_r() + - ltc * ltc_stride; - op_pending = gk20a_readl(g, cmgmt1); - } while ((op_pending & - ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f()) && - !nvgpu_timeout_expired_msg(&timeout, - "L2 flush timeout!")); - } - - /* And invalidate. */ - nvgpu_writel_check(g, ltc_ltcs_ltss_tstg_cmgmt0_r(), - ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f() | - ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f() | - ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f() | - ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f() | - ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f()); - - /* Wait on each LTC individually. */ - for (ltc = 0; ltc < g->ltc_count; ltc++) { - u32 op_pending; - - /* Again, 5ms. */ - nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER); - - do { - int cmgmt0 = ltc_ltc0_ltss_tstg_cmgmt0_r() + - ltc * ltc_stride; - op_pending = gk20a_readl(g, cmgmt0); - } while ((op_pending & - ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f()) && - !nvgpu_timeout_expired_msg(&timeout, - "L2 flush timeout!")); - } -} - -int gm20b_determine_L2_size_bytes(struct gk20a *g) -{ - u32 lts_per_ltc; - u32 ways; - u32 sets; - u32 bytes_per_line; - u32 active_ltcs; - u32 cache_size; - - u32 tmp; - u32 active_sets_value; - - tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r()); - ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp)); - - active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp); - if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) { - sets = 64U; - } else if (active_sets_value == - ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) { - sets = 32U; - } else if (active_sets_value == - ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) { - sets = 16U; - } else { - nvgpu_err(g, "Unknown constant %u for active sets", - (unsigned)active_sets_value); - sets = 0U; - } - - active_ltcs = g->gr.num_fbps; - - /* chip-specific values */ - lts_per_ltc = 2U; - bytes_per_line = 128U; - cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line; - - return cache_size; -} - -/* - * Sets the ZBC color for the passed index. - */ -void gm20b_ltc_set_zbc_color_entry(struct gk20a *g, - struct zbc_entry *color_val, - u32 index) -{ - u32 i; - u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; - - nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(), - ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); - - for (i = 0; - i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) { - nvgpu_writel_check(g, - ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i), - color_val->color_l2[i]); - } -} - -/* - * Sets the ZBC depth for the passed index. - */ -void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g, - struct zbc_entry *depth_val, - u32 index) -{ - u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; - - nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(), - ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); - - nvgpu_writel_check(g, - ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(), - depth_val->depth); -} - -void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) -{ - u32 max_size = gr->max_comptag_mem; - u32 max_comptag_lines = max_size << 3U; - - u32 compbit_base_post_divide; - u64 compbit_base_post_multiply64; - u64 compbit_store_iova; - u64 compbit_base_post_divide64; - - if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) - compbit_store_iova = nvgpu_mem_get_phys_addr(g, - &gr->compbit_store.mem); - else - compbit_store_iova = nvgpu_mem_get_addr(g, - &gr->compbit_store.mem); - - compbit_base_post_divide64 = compbit_store_iova >> - ltc_ltcs_ltss_cbc_base_alignment_shift_v(); - - do_div(compbit_base_post_divide64, g->ltc_count); - compbit_base_post_divide = u64_lo32(compbit_base_post_divide64); - - compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * - g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); - - if (compbit_base_post_multiply64 < compbit_store_iova) - compbit_base_post_divide++; - - /* Bug 1477079 indicates sw adjustment on the posted divided base. */ - if (g->ops.ltc.cbc_fix_config) - compbit_base_post_divide = - g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); - - gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), - compbit_base_post_divide); - - nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, - "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", - (u32)(compbit_store_iova >> 32), - (u32)(compbit_store_iova & 0xffffffff), - compbit_base_post_divide); - - gr->compbit_store.base_hw = compbit_base_post_divide; - - g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate, - 0, max_comptag_lines - 1); - -} - -void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled) -{ - u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); - u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); - - if (enabled) - /* bypass disabled (normal caching ops)*/ - reg &= ~reg_f; - else - /* bypass enabled (no caching) */ - reg |= reg_f; - - gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); -} diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.h b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.h deleted file mode 100644 index 0f9145be..00000000 --- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * GM20B L2 - * - * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef _NVHOST_GM20B_LTC -#define _NVHOST_GM20B_LTC -struct gpu_ops; - -int gm20b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr); -int gm20b_determine_L2_size_bytes(struct gk20a *g); -void gm20b_ltc_set_zbc_color_entry(struct gk20a *g, - struct zbc_entry *color_val, - u32 index); -void gm20b_ltc_set_zbc_depth_entry(struct gk20a *g, - struct zbc_entry *depth_val, - u32 index); -void gm20b_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr); -void gm20b_ltc_set_enabled(struct gk20a *g, bool enabled); -void gm20b_ltc_init_fs_state(struct gk20a *g); -int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, - u32 min, u32 max); -void gm20b_ltc_isr(struct gk20a *g); -u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base); -void gm20b_flush_ltc(struct gk20a *g); -int gm20b_ltc_alloc_phys_cbc(struct gk20a *g, - size_t compbit_backing_size); -int gm20b_ltc_alloc_virt_cbc(struct gk20a *g, - size_t compbit_backing_size); -#endif -- cgit v1.2.2