From a9785995d5f22aaeb659285f8aeb64d8b56982e0 Mon Sep 17 00:00:00 2001 From: Arto Merilainen Date: Wed, 19 Mar 2014 09:38:25 +0200 Subject: gpu: nvgpu: Add NVIDIA GPU Driver This patch moves the NVIDIA GPU driver to a new location. Bug 1482562 Change-Id: I24293810b9d0f1504fd9be00135e21dad656ccb6 Signed-off-by: Arto Merilainen Reviewed-on: http://git-master/r/383722 Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/gk20a/ltc_common.c | 243 +++++++++++++++++++++++++++++++++++ 1 file changed, 243 insertions(+) create mode 100644 drivers/gpu/nvgpu/gk20a/ltc_common.c (limited to 'drivers/gpu/nvgpu/gk20a/ltc_common.c') diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c new file mode 100644 index 00000000..cbb27cc7 --- /dev/null +++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c @@ -0,0 +1,243 @@ +/* + * drivers/video/tegra/host/gk20a/ltc_common.c + * + * GK20A Graphics + * + * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include "gk20a.h" +#include "gr_gk20a.h" + +static int gk20a_determine_L2_size_bytes(struct gk20a *g) +{ + const u32 gpuid = GK20A_GPUID(g->gpu_characteristics.arch, + g->gpu_characteristics.impl); + u32 lts_per_ltc; + u32 ways; + u32 sets; + u32 bytes_per_line; + u32 active_ltcs; + u32 cache_size; + + u32 tmp; + u32 active_sets_value; + + tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r()); + ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp)); + + active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp); + if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) { + sets = 64; + } else if (active_sets_value == + ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) { + sets = 32; + } else if (active_sets_value == + ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) { + sets = 16; + } else { + dev_err(dev_from_gk20a(g), + "Unknown constant %u for active sets", + (unsigned)active_sets_value); + sets = 0; + } + + active_ltcs = g->gr.num_fbps; + + /* chip-specific values */ + switch (gpuid) { + case GK20A_GPUID_GK20A: + lts_per_ltc = 1; + bytes_per_line = 128; + break; + + default: + dev_err(dev_from_gk20a(g), "Unknown GPU id 0x%02x\n", + (unsigned)gpuid); + lts_per_ltc = 0; + bytes_per_line = 0; + } + + cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line; + + return cache_size; +} + +/* + * Set the maximum number of ways that can have the "EVIST_LAST" class. + */ +static void gk20a_ltc_set_max_ways_evict_last(struct gk20a *g, u32 max_ways) +{ + u32 mgmt_reg; + + mgmt_reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_r()) & + ~ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(~0); + mgmt_reg |= ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(max_ways); + + gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_r(), mgmt_reg); +} + +/* + * Sets the ZBC color for the passed index. + */ +static void gk20a_ltc_set_zbc_color_entry(struct gk20a *g, + struct zbc_entry *color_val, + u32 index) +{ + u32 i; + u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; + + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); + + for (i = 0; + i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i), + color_val->color_l2[i]); +} + +/* + * Sets the ZBC depth for the passed index. + */ +static void gk20a_ltc_set_zbc_depth_entry(struct gk20a *g, + struct zbc_entry *depth_val, + u32 index) +{ + u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; + + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); + + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(), + depth_val->depth); +} + +/* + * Clear the L2 ZBC color table for the passed index. + */ +static void gk20a_ltc_clear_zbc_color_entry(struct gk20a *g, u32 index) +{ + u32 i; + u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; + + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); + + for (i = 0; + i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) + gk20a_writel(g, + ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i), 0); +} + +/* + * Clear the L2 ZBC depth entry for the passed index. + */ +static void gk20a_ltc_clear_zbc_depth_entry(struct gk20a *g, u32 index) +{ + u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; + + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); + + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(), 0); +} + +static int gk20a_ltc_init_zbc(struct gk20a *g, struct gr_gk20a *gr) +{ + u32 i, j; + + /* reset zbc clear */ + for (i = 0; i < GK20A_SIZEOF_ZBC_TABLE - + GK20A_STARTOF_ZBC_TABLE; i++) { + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + (gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r()) & + ~ltc_ltcs_ltss_dstg_zbc_index_address_f(~0)) | + ltc_ltcs_ltss_dstg_zbc_index_address_f( + i + GK20A_STARTOF_ZBC_TABLE)); + for (j = 0; j < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); j++) + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(j), 0); + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(), 0); + } + + gr_gk20a_clear_zbc_table(g, gr); + gr_gk20a_load_zbc_default_table(g, gr); + + return 0; +} + +static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr) +{ + u32 compbit_base_post_divide; + u64 compbit_base_post_multiply64; + u64 compbit_store_base_iova = + NV_MC_SMMU_VADDR_TRANSLATE(gr->compbit_store.base_iova); + u64 compbit_base_post_divide64 = (compbit_store_base_iova >> + ltc_ltcs_ltss_cbc_base_alignment_shift_v()); + + do_div(compbit_base_post_divide64, gr->num_fbps); + compbit_base_post_divide = u64_lo32(compbit_base_post_divide64); + + compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * + gr->num_fbps) << ltc_ltcs_ltss_cbc_base_alignment_shift_v(); + + if (compbit_base_post_multiply64 < compbit_store_base_iova) + compbit_base_post_divide++; + + gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), + compbit_base_post_divide); + + gk20a_dbg(gpu_dbg_info | gpu_dbg_map | gpu_dbg_pte, + "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", + (u32)(compbit_store_base_iova >> 32), + (u32)(compbit_store_base_iova & 0xffffffff), + compbit_base_post_divide); +} + +/* Flushes the compression bit cache as well as "data". + * Note: the name here is a bit of a misnomer. ELPG uses this + * internally... but ELPG doesn't have to be on to do it manually. + */ +static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g) +{ + u32 data; + s32 retry = 100; + + gk20a_dbg_fn(""); + + /* Make sure all previous writes are committed to the L2. There's no + guarantee that writes are to DRAM. This will be a sysmembar internal + to the L2. */ + gk20a_writel(g, ltc_ltss_g_elpg_r(), + ltc_ltss_g_elpg_flush_pending_f()); + do { + data = gk20a_readl(g, ltc_ltss_g_elpg_r()); + + if (ltc_ltss_g_elpg_flush_v(data) == + ltc_ltss_g_elpg_flush_pending_v()) { + gk20a_dbg_info("g_elpg_flush 0x%x", data); + retry--; + usleep_range(20, 40); + } else + break; + } while (retry >= 0 || !tegra_platform_is_silicon()); + + if (retry < 0) + gk20a_warn(dev_from_gk20a(g), + "g_elpg_flush too many retries"); + +} -- cgit v1.2.2