summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2018-02-13 04:07:18 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-03-08 02:27:49 -0500
commitcc4b9f540f66abc9f60cf9f8e2217ff17349bc77 (patch)
tree695dca926578d8b02fab2cbf9fb98d3d4733c39f
parent418f31cd91a5c3ca45f0920ed64205def49c8a80 (diff)
gpu: nvgpu: PMU super surface support
- Added ops "pmu.alloc_super_surface" to create memory space for pmu super surface - Defined method nvgpu_pmu_sysmem_surface_alloc() to allocate pmu super surface memory & assigned to "pmu.alloc_super_surface" for gv100 - "pmu.alloc_super_surface" set to NULL for gp106 - Memory space of size "struct nv_pmu_super_surface" is allocated during pmu sw init setup if "pmu.alloc_super_surface" is not NULL & free if error occur. - Added ops "pmu_ver.config_pmu_cmdline_args_super_surface" to describe PMU super surface details to PMU ucode as part of pmu command line args command if "pmu.alloc_super_surface" is not NULL. - Updated pmu_cmdline_args_v6 to include member "struct flcn_mem_desc_v0 super_surface" - Free allocated memory for PMU super surface in nvgpu_remove_pmu_support() method - Added "struct nvgpu_mem super_surface_buf" to "nvgpu_pmu" struct - Created header file "gpmu_super_surf_if.h" to include interface about pmu super surface, added "struct nv_pmu_super_surface" to hold super surface members along with rsvd[x] dummy space to sync members offset with PMU super surface members. Change-Id: I2b28912bf4d86a8cc72884e3b023f21c73fb3503 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1656571 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c30
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_fw.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c1
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c2
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.c1
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmu.h4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmu_super_surf_if.h77
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pmu.h4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h1
10 files changed, 135 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index e96ea090..aaae138c 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -241,11 +241,19 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
241 241
242 pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE; 242 pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
243 243
244 if (g->ops.pmu.alloc_super_surface) {
245 err = g->ops.pmu.alloc_super_surface(g,
246 &pmu->super_surface_buf,
247 sizeof(struct nv_pmu_super_surface));
248 if (err)
249 goto err_free_seq_buf;
250 }
251
244 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, 252 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
245 &pmu->trace_buf); 253 &pmu->trace_buf);
246 if (err) { 254 if (err) {
247 nvgpu_err(g, "failed to allocate pmu trace buffer\n"); 255 nvgpu_err(g, "failed to allocate pmu trace buffer\n");
248 goto err_free_seq_buf; 256 goto err_free_super_surface;
249 } 257 }
250 258
251 pmu->sw_ready = true; 259 pmu->sw_ready = true;
@@ -253,6 +261,9 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
253skip_init: 261skip_init:
254 nvgpu_log_fn(g, "done"); 262 nvgpu_log_fn(g, "done");
255 return 0; 263 return 0;
264 err_free_super_surface:
265 if (g->ops.pmu.alloc_super_surface)
266 nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
256 err_free_seq_buf: 267 err_free_seq_buf:
257 nvgpu_dma_unmap_free(vm, &pmu->seq_buf); 268 nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
258 err_free_seq: 269 err_free_seq:
@@ -560,6 +571,23 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
560 return 0; 571 return 0;
561} 572}
562 573
574int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
575 struct nvgpu_mem *mem_surface, u32 size)
576{
577 struct vm_gk20a *vm = g->mm.pmu.vm;
578 int err = 0;
579
580 nvgpu_log_fn(g, " ");
581
582 err = nvgpu_dma_alloc_map(vm, size, mem_surface);
583 if (err) {
584 nvgpu_err(g, "failed to allocate pmu suffer surface\n");
585 err = -ENOMEM;
586 }
587
588 return err;
589}
590
563void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) 591void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
564{ 592{
565 nvgpu_dma_free(g, mem); 593 nvgpu_dma_free(g, mem);
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
index 549cfdc6..6b565abb 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
@@ -142,6 +142,16 @@ static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu)
142 nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); 142 nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf);
143} 143}
144 144
145static void config_pmu_cmdline_args_super_surface_v6(struct nvgpu_pmu *pmu)
146{
147 struct gk20a *g = gk20a_from_pmu(pmu);
148
149 if (g->ops.pmu.alloc_super_surface) {
150 nvgpu_pmu_surface_describe(g, &pmu->super_surface_buf,
151 &pmu->args_v6.super_surface);
152 }
153}
154
145static void set_pmu_cmdline_args_falctracedmaidx_v5( 155static void set_pmu_cmdline_args_falctracedmaidx_v5(
146 struct nvgpu_pmu *pmu, u32 idx) 156 struct nvgpu_pmu *pmu, u32 idx)
147{ 157{
@@ -1250,6 +1260,8 @@ static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu)
1250 set_pmu_cmdline_args_falctracedmabase_v5; 1260 set_pmu_cmdline_args_falctracedmabase_v5;
1251 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = 1261 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx =
1252 set_pmu_cmdline_args_falctracedmaidx_v5; 1262 set_pmu_cmdline_args_falctracedmaidx_v5;
1263 g->ops.pmu_ver.config_pmu_cmdline_args_super_surface =
1264 config_pmu_cmdline_args_super_surface_v6;
1253 g->ops.pmu_ver.get_pmu_cmdline_args_ptr = 1265 g->ops.pmu_ver.get_pmu_cmdline_args_ptr =
1254 get_pmu_cmdline_args_ptr_v5; 1266 get_pmu_cmdline_args_ptr_v5;
1255 g->ops.pmu_ver.get_pmu_allocation_struct_size = 1267 g->ops.pmu_ver.get_pmu_allocation_struct_size =
@@ -1587,6 +1599,8 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
1587 1599
1588 nvgpu_dma_unmap_free(vm, &pmu->seq_buf); 1600 nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
1589 1601
1602 nvgpu_dma_unmap_free(vm, &pmu->super_surface_buf);
1603
1590 nvgpu_mutex_destroy(&pmu->elpg_mutex); 1604 nvgpu_mutex_destroy(&pmu->elpg_mutex);
1591 nvgpu_mutex_destroy(&pmu->pg_mutex); 1605 nvgpu_mutex_destroy(&pmu->pg_mutex);
1592 nvgpu_mutex_destroy(&pmu->isr_mutex); 1606 nvgpu_mutex_destroy(&pmu->isr_mutex);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 6b93cb8d..0e164a72 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -641,6 +641,8 @@ struct gpu_ops {
641 u32 size); 641 u32 size);
642 void (*set_pmu_cmdline_args_trace_dma_base)( 642 void (*set_pmu_cmdline_args_trace_dma_base)(
643 struct nvgpu_pmu *pmu); 643 struct nvgpu_pmu *pmu);
644 void (*config_pmu_cmdline_args_super_surface)(
645 struct nvgpu_pmu *pmu);
644 void (*set_pmu_cmdline_args_trace_dma_idx)( 646 void (*set_pmu_cmdline_args_trace_dma_idx)(
645 struct nvgpu_pmu *pmu, u32 idx); 647 struct nvgpu_pmu *pmu, u32 idx);
646 void * (*get_pmu_cmdline_args_ptr)(struct nvgpu_pmu *pmu); 648 void * (*get_pmu_cmdline_args_ptr)(struct nvgpu_pmu *pmu);
@@ -914,6 +916,8 @@ struct gpu_ops {
914 void (*update_lspmu_cmdline_args)(struct gk20a *g); 916 void (*update_lspmu_cmdline_args)(struct gk20a *g);
915 void (*setup_apertures)(struct gk20a *g); 917 void (*setup_apertures)(struct gk20a *g);
916 u32 (*get_irqdest)(struct gk20a *g); 918 u32 (*get_irqdest)(struct gk20a *g);
919 int (*alloc_super_surface)(struct gk20a *g,
920 struct nvgpu_mem *super_surface, u32 size);
917 } pmu; 921 } pmu;
918 struct { 922 struct {
919 int (*init_debugfs)(struct gk20a *g); 923 int (*init_debugfs)(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index e3d5556f..b0eab947 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -611,6 +611,7 @@ static const struct gpu_ops gp106_ops = {
611 .pmu_get_queue_tail = pwr_pmu_queue_tail_r, 611 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
612 .pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg, 612 .pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg,
613 .get_irqdest = gk20a_pmu_get_irqdest, 613 .get_irqdest = gk20a_pmu_get_irqdest,
614 .alloc_super_surface = NULL,
614 }, 615 },
615 .clk = { 616 .clk = {
616 .init_clk_support = gp106_init_clk_support, 617 .init_clk_support = gp106_init_clk_support,
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index 8e4e5900..08c7f84a 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -178,6 +178,8 @@ void init_pmu_setup_hw1(struct gk20a *g)
178 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu); 178 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
179 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx( 179 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
180 pmu, GK20A_PMU_DMAIDX_VIRT); 180 pmu, GK20A_PMU_DMAIDX_VIRT);
181 if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface)
182 g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
181 183
182 nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args, 184 nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
183 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), 185 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c
index 00f5bcce..802a43af 100644
--- a/drivers/gpu/nvgpu/gv100/hal_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c
@@ -620,6 +620,7 @@ static const struct gpu_ops gv100_ops = {
620 .is_engine_in_reset = gp106_pmu_is_engine_in_reset, 620 .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
621 .pmu_get_queue_tail = pwr_pmu_queue_tail_r, 621 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
622 .get_irqdest = gk20a_pmu_get_irqdest, 622 .get_irqdest = gk20a_pmu_get_irqdest,
623 .alloc_super_surface = nvgpu_pmu_super_surface_alloc,
623 }, 624 },
624 .clk = { 625 .clk = {
625 .init_clk_support = gp106_init_clk_support, 626 .init_clk_support = gp106_init_clk_support,
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
index 5e34abe6..76a54fa1 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
@@ -306,6 +306,8 @@ struct nvgpu_pmu {
306 /* TBD: remove this if ZBC seq is fixed */ 306 /* TBD: remove this if ZBC seq is fixed */
307 struct nvgpu_mem seq_buf; 307 struct nvgpu_mem seq_buf;
308 struct nvgpu_mem trace_buf; 308 struct nvgpu_mem trace_buf;
309 struct nvgpu_mem super_surface_buf;
310
309 bool buf_loaded; 311 bool buf_loaded;
310 312
311 struct pmu_sha1_gid gid_info; 313 struct pmu_sha1_gid gid_info;
@@ -449,6 +451,8 @@ int nvgpu_init_pmu_support(struct gk20a *g);
449int nvgpu_pmu_destroy(struct gk20a *g); 451int nvgpu_pmu_destroy(struct gk20a *g);
450int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu, 452int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
451 struct pmu_msg *msg); 453 struct pmu_msg *msg);
454int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
455 struct nvgpu_mem *mem_surface, u32 size);
452 456
453void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, 457void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
454 bool post_change_event); 458 bool post_change_event);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmu_super_surf_if.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmu_super_surf_if.h
new file mode 100644
index 00000000..cf39658f
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmu_super_surf_if.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef __GPMU_SUPER_SURF_IF_H__
23#define __GPMU_SUPER_SURF_IF_H__
24
25struct nv_pmu_super_surface_hdr {
26 u32 memberMask;
27 u16 dmemBufferSizeMax;
28};
29
30NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_super_surface_hdr,
31 sizeof(struct nv_pmu_super_surface_hdr));
32
33/*
34 * Global Super Surface structure for combined INIT data required by PMU.
35 * NOTE: Any new substructures or entries must be aligned.
36 */
37struct nv_pmu_super_surface {
38 union nv_pmu_super_surface_hdr_aligned hdr;
39
40 struct {
41 struct nv_pmu_volt_volt_device_boardobj_grp_set volt_device_grp_set;
42 struct nv_pmu_volt_volt_policy_boardobj_grp_set volt_policy_grp_set;
43 struct nv_pmu_volt_volt_rail_boardobj_grp_set volt_rail_grp_set;
44
45 struct nv_pmu_volt_volt_policy_boardobj_grp_get_status volt_policy_grp_get_status;
46 struct nv_pmu_volt_volt_rail_boardobj_grp_get_status volt_rail_grp_get_status;
47 struct nv_pmu_volt_volt_device_boardobj_grp_get_status volt_device_grp_get_status;
48 } volt;
49 struct {
50 struct nv_pmu_clk_clk_vin_device_boardobj_grp_set clk_vin_device_grp_set;
51 struct nv_pmu_clk_clk_domain_boardobj_grp_set clk_domain_grp_set;
52 struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set clk_freq_controller_grp_set;
53 struct nv_pmu_clk_clk_fll_device_boardobj_grp_set clk_fll_device_grp_set;
54 struct nv_pmu_clk_clk_prog_boardobj_grp_set clk_prog_grp_set;
55 struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_grp_set;
56
57 struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status clk_vin_device_grp_get_status;
58 struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status clk_fll_device_grp_get_status;
59 struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status clk_vf_point_grp_get_status;
60 u8 clk_rsvd[0x4660];
61 } clk;
62 struct {
63 struct nv_pmu_perf_vfe_equ_boardobj_grp_set vfe_equ_grp_set;
64 struct nv_pmu_perf_vfe_var_boardobj_grp_set vfe_var_grp_set;
65
66 struct nv_pmu_perf_vfe_var_boardobj_grp_get_status vfe_var_grp_get_status;
67 u8 perf_rsvd[0x40790];
68 u8 perfcf_rsvd[0x1eb0];
69 } perf;
70 struct {
71 struct nv_pmu_therm_therm_channel_boardobj_grp_set therm_channel_grp_set;
72 struct nv_pmu_therm_therm_device_boardobj_grp_set therm_device_grp_set;
73 u8 therm_rsvd[0x1460];
74 } therm;
75};
76
77#endif /* __GPMU_SUPER_SURF_IF_H__ */
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pmu.h
index 5f718dc6..2dd511de 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmuif_pmu.h
@@ -66,8 +66,8 @@ struct pmu_cmdline_args_v6 {
66 u8 raise_priv_sec; 66 u8 raise_priv_sec;
67 struct flcn_mem_desc_v0 gc6_ctx; 67 struct flcn_mem_desc_v0 gc6_ctx;
68 struct flcn_mem_desc_v0 gc6_bsod_ctx; 68 struct flcn_mem_desc_v0 gc6_bsod_ctx;
69 struct flcn_mem_desc_v0 init_data_dma_info; 69 struct flcn_mem_desc_v0 super_surface;
70 u32 dummy; 70 u32 flags;
71}; 71};
72 72
73/* GPU ID */ 73/* GPU ID */
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
index 208644d7..1a05ec29 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/nvgpu_gpmu_cmdif.h
@@ -38,6 +38,7 @@
38#include "gpmuiftherm.h" 38#include "gpmuiftherm.h"
39#include "gpmuifthermsensor.h" 39#include "gpmuifthermsensor.h"
40#include "gpmuifseq.h" 40#include "gpmuifseq.h"
41#include "gpmu_super_surf_if.h"
41 42
42/* 43/*
43 * Command requesting execution of the RPC (Remote Procedure Call) 44 * Command requesting execution of the RPC (Remote Procedure Call)