From 0e8aee1c1a38abbc2dccf3f604a9843cf38071e0 Mon Sep 17 00:00:00 2001 From: Deepak Goyal Date: Fri, 22 Sep 2017 12:08:10 +0530 Subject: gpu: nvgpu: skip clk gating prog for sim/emu. For Simualtion/Emulation platforms,clock gating should be skipped as it is not supported. Added new flags "can_"X"lcg" to check platform capability before doing SLCG,BLCG and ELCG. Bug 200314250 Change-Id: I4124d444a77a4c06df8c1d82c6038bfd457f3db0 Signed-off-by: Deepak Goyal Reviewed-on: https://git-master.nvidia.com/r/1566049 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c | 99 +++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c') diff --git a/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c b/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c index 473c97f3..944fa741 100644 --- a/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c +++ b/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,6 +26,7 @@ #define __gp10b_gating_reglist_h__ #include "gp10b_gating_reglist.h" +#include struct gating_desc { u32 addr; @@ -281,6 +282,10 @@ void gp10b_slcg_bus_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_bus) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_bus[i].addr, @@ -296,6 +301,10 @@ void gp10b_slcg_ce2_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_ce2) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_ce2[i].addr, @@ -311,6 +320,10 @@ void gp10b_slcg_chiplet_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_chiplet) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_chiplet[i].addr, @@ -331,6 +344,10 @@ void gp10b_slcg_fb_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_fb) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_fb[i].addr, @@ -346,6 +363,10 @@ void gp10b_slcg_fifo_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_fifo) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_fifo[i].addr, @@ -361,6 +382,10 @@ void gr_gp10b_slcg_gr_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_gr) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_gr[i].addr, @@ -376,6 +401,10 @@ void ltc_gp10b_slcg_ltc_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_ltc) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_ltc[i].addr, @@ -391,6 +420,10 @@ void gp10b_slcg_perf_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_perf) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_perf[i].addr, @@ -406,6 +439,10 @@ void gp10b_slcg_priring_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_priring) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_priring[i].addr, @@ -421,6 +458,10 @@ void gp10b_slcg_pwr_csb_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_pwr_csb) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_pwr_csb[i].addr, @@ -436,6 +477,10 @@ void gp10b_slcg_pmu_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_pmu) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_pmu[i].addr, @@ -451,6 +496,10 @@ void gp10b_slcg_therm_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_therm) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_therm[i].addr, @@ -466,6 +515,10 @@ void gp10b_slcg_xbar_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_slcg_xbar) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_slcg_xbar[i].addr, @@ -481,6 +534,10 @@ void gp10b_blcg_bus_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_bus) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_bus[i].addr, @@ -496,6 +553,10 @@ void gp10b_blcg_ce_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_ce) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_ce[i].addr, @@ -511,6 +572,10 @@ void gp10b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_ctxsw_prog) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_ctxsw_prog[i].addr, @@ -526,6 +591,10 @@ void gp10b_blcg_fb_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_fb) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_fb[i].addr, @@ -541,6 +610,10 @@ void gp10b_blcg_fifo_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_fifo) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_fifo[i].addr, @@ -556,6 +629,10 @@ void gp10b_blcg_gr_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_gr) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_gr[i].addr, @@ -571,6 +648,10 @@ void gp10b_blcg_ltc_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_ltc) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_ltc[i].addr, @@ -586,6 +667,10 @@ void gp10b_blcg_pwr_csb_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_pwr_csb) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_pwr_csb[i].addr, @@ -601,6 +686,10 @@ void gp10b_blcg_pmu_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_pmu) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_pmu[i].addr, @@ -616,6 +705,10 @@ void gp10b_blcg_xbar_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_blcg_xbar) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_blcg_xbar[i].addr, @@ -631,6 +724,10 @@ void gr_gp10b_pg_gr_load_gating_prod(struct gk20a *g, { u32 i; u32 size = sizeof(gp10b_pg_gr) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + for (i = 0; i < size; i++) { if (prod) gk20a_writel(g, gp10b_pg_gr[i].addr, -- cgit v1.2.2