From 673dd971600b26131c0afdb221e13c080da022fd Mon Sep 17 00:00:00 2001 From: Mahantesh Kumbar Date: Wed, 10 May 2017 20:35:24 +0530 Subject: gpu: nvgpu: moved & renamed "struct pmu_gk20a" - Renamed "struct pmu_gk20a" to "struct nvgpu_pmu" then moved to file "pmu.h" under folder "drivers/gpu/nvgpu/include/nvgpu/" - Included header file "pmu.h" to dependent file & removed "pmu_gk20a.h" include if its usage is not present. - Replaced "struct pmu_gk20a" with "struct nvgpu_pmu" in dependent source & header files. JIRA NVGPU-56 Change-Id: Ia3c606616831027093d5c216959c6a40d7c2632e Signed-off-by: Mahantesh Kumbar Reviewed-on: http://git-master/r/1479209 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/boardobj/boardobj.h | 6 +- drivers/gpu/nvgpu/boardobj/boardobjgrp.c | 6 +- drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.h | 1 - drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.h | 1 - drivers/gpu/nvgpu/clk/clk.c | 5 +- drivers/gpu/nvgpu/clk/clk_domain.c | 1 - drivers/gpu/nvgpu/clk/clk_fll.c | 1 - drivers/gpu/nvgpu/clk/clk_freq_controller.c | 1 - drivers/gpu/nvgpu/clk/clk_mclk.c | 1 - drivers/gpu/nvgpu/clk/clk_prog.c | 2 - drivers/gpu/nvgpu/clk/clk_vf_point.c | 2 +- drivers/gpu/nvgpu/clk/clk_vin.c | 4 +- drivers/gpu/nvgpu/common/linux/debug_pmu.c | 2 +- drivers/gpu/nvgpu/gk20a/gk20a.h | 53 +++-- drivers/gpu/nvgpu/gk20a/gk20a_scale.c | 4 +- drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c | 2 +- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 2 +- drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 318 ++++++++++++------------- drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | 324 ++----------------------- drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 19 +- drivers/gpu/nvgpu/gm20b/pmu_gm20b.c | 9 +- drivers/gpu/nvgpu/gp106/acr_gp106.c | 13 +- drivers/gpu/nvgpu/gp106/pmu_gp106.c | 14 +- drivers/gpu/nvgpu/gp106/sec2_gp106.c | 12 +- drivers/gpu/nvgpu/gp106/sec2_gp106.h | 10 +- drivers/gpu/nvgpu/gp10b/pmu_gp10b.c | 13 +- drivers/gpu/nvgpu/include/nvgpu/pmu.h | 328 ++++++++++++++++++++++++++ drivers/gpu/nvgpu/lpwr/lpwr.c | 13 +- drivers/gpu/nvgpu/lpwr/rppg.c | 8 +- drivers/gpu/nvgpu/perf/perf.c | 6 +- drivers/gpu/nvgpu/perf/vfe_equ.c | 1 - drivers/gpu/nvgpu/perf/vfe_var.c | 1 - drivers/gpu/nvgpu/pmgr/pmgrpmu.c | 2 +- drivers/gpu/nvgpu/pmgr/pwrdev.c | 1 - drivers/gpu/nvgpu/pmgr/pwrmonitor.c | 1 - drivers/gpu/nvgpu/pmgr/pwrpolicy.c | 1 - drivers/gpu/nvgpu/therm/thrmchannel.c | 3 +- drivers/gpu/nvgpu/therm/thrmdev.c | 3 +- drivers/gpu/nvgpu/volt/volt_dev.c | 1 - drivers/gpu/nvgpu/volt/volt_pmu.c | 5 +- drivers/gpu/nvgpu/volt/volt_policy.c | 1 - drivers/gpu/nvgpu/volt/volt_rail.c | 1 - 42 files changed, 614 insertions(+), 588 deletions(-) create mode 100644 drivers/gpu/nvgpu/include/nvgpu/pmu.h (limited to 'drivers/gpu') diff --git a/drivers/gpu/nvgpu/boardobj/boardobj.h b/drivers/gpu/nvgpu/boardobj/boardobj.h index a445a2c6..f2ae52e6 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobj.h +++ b/drivers/gpu/nvgpu/boardobj/boardobj.h @@ -14,11 +14,11 @@ #ifndef _BOARDOBJ_H_ #define _BOARDOBJ_H_ -struct boardobj; +#include -#include "gk20a/pmu_gk20a.h" #include "ctrl/ctrlboardobj.h" -#include + +struct boardobj; /* * check whether the specified BOARDOBJ object implements the queried diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c index d2fb8a45..51c23589 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c +++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -10,15 +10,13 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ +#include #include "gk20a/gk20a.h" -#include "gk20a/pmu_gk20a.h" #include "boardobjgrp.h" #include "ctrl/ctrlboardobj.h" #include "boardobj.h" -#include - static boardobjgrp_objinsert boardobjgrp_objinsert_final; static boardobjgrp_objgetbyidx boardobjgrp_objgetbyidx_final; static boardobjgrp_objgetnext boardobjgrp_objgetnext_final; diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.h b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.h index 059b224d..93e9d4b0 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.h +++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e255.h @@ -14,7 +14,6 @@ #ifndef _BOARDOBJGRP_E255_H_ #define _BOARDOBJGRP_E255_H_ -#include "gk20a/pmu_gk20a.h" #include "ctrl/ctrlboardobj.h" #include "boardobj.h" #include "boardobjgrpmask.h" diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.h b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.h index fb114024..62498f79 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.h +++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp_e32.h @@ -14,7 +14,6 @@ #ifndef _BOARDOBJGRP_E32_H_ #define _BOARDOBJGRP_E32_H_ -#include "gk20a/pmu_gk20a.h" #include "ctrl/ctrlboardobj.h" #include "boardobj.h" #include "boardobjgrp.h" diff --git a/drivers/gpu/nvgpu/clk/clk.c b/drivers/gpu/nvgpu/clk/clk.c index c1b8d5e1..72b6d246 100644 --- a/drivers/gpu/nvgpu/clk/clk.c +++ b/drivers/gpu/nvgpu/clk/clk.c @@ -11,13 +11,14 @@ * more details. */ +#include +#include + #include "gk20a/gk20a.h" #include "clk.h" -#include #include "ctrl/ctrlclk.h" #include "ctrl/ctrlvolt.h" #include "volt/volt.h" -#include "gk20a/pmu_gk20a.h" #define BOOT_GPC2CLK_MHZ 2581 #define BOOT_MCLK_MHZ 3003 diff --git a/drivers/gpu/nvgpu/clk/clk_domain.c b/drivers/gpu/nvgpu/clk/clk_domain.c index 84ce7371..5152772c 100644 --- a/drivers/gpu/nvgpu/clk/clk_domain.c +++ b/drivers/gpu/nvgpu/clk/clk_domain.c @@ -21,7 +21,6 @@ #include "boardobj/boardobjgrp_e32.h" #include "ctrl/ctrlclk.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" static struct clk_domain *construct_clk_domain(struct gk20a *g, void *pargs); diff --git a/drivers/gpu/nvgpu/clk/clk_fll.c b/drivers/gpu/nvgpu/clk/clk_fll.c index e097e593..7d1264ab 100644 --- a/drivers/gpu/nvgpu/clk/clk_fll.c +++ b/drivers/gpu/nvgpu/clk/clk_fll.c @@ -20,7 +20,6 @@ #include "boardobj/boardobjgrp_e32.h" #include "ctrl/ctrlclk.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" static u32 devinit_get_fll_device_table(struct gk20a *g, struct avfsfllobjs *pfllobjs); diff --git a/drivers/gpu/nvgpu/clk/clk_freq_controller.c b/drivers/gpu/nvgpu/clk/clk_freq_controller.c index 632d7b35..9bc1390a 100644 --- a/drivers/gpu/nvgpu/clk/clk_freq_controller.c +++ b/drivers/gpu/nvgpu/clk/clk_freq_controller.c @@ -22,7 +22,6 @@ #include "boardobj/boardobjgrp_e32.h" #include "ctrl/ctrlclk.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" static u32 clk_freq_controller_pmudatainit_super(struct gk20a *g, struct boardobj *board_obj_ptr, diff --git a/drivers/gpu/nvgpu/clk/clk_mclk.c b/drivers/gpu/nvgpu/clk/clk_mclk.c index 3996c0e1..068e7234 100644 --- a/drivers/gpu/nvgpu/clk/clk_mclk.c +++ b/drivers/gpu/nvgpu/clk/clk_mclk.c @@ -17,7 +17,6 @@ #include #include "gk20a/gk20a.h" -#include "gk20a/pmu_gk20a.h" #ifdef CONFIG_DEBUG_FS #include "gk20a/platform_gk20a.h" #endif diff --git a/drivers/gpu/nvgpu/clk/clk_prog.c b/drivers/gpu/nvgpu/clk/clk_prog.c index 5697c255..047eb04a 100644 --- a/drivers/gpu/nvgpu/clk/clk_prog.c +++ b/drivers/gpu/nvgpu/clk/clk_prog.c @@ -12,7 +12,6 @@ */ #include - #include #include "gk20a/gk20a.h" @@ -24,7 +23,6 @@ #include "gm206/bios_gm206.h" #include "ctrl/ctrlclk.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" static struct clk_prog *construct_clk_prog(struct gk20a *g, void *pargs); static u32 devinit_get_clk_prog_table(struct gk20a *g, diff --git a/drivers/gpu/nvgpu/clk/clk_vf_point.c b/drivers/gpu/nvgpu/clk/clk_vf_point.c index 3ec111f0..a53af185 100644 --- a/drivers/gpu/nvgpu/clk/clk_vf_point.c +++ b/drivers/gpu/nvgpu/clk/clk_vf_point.c @@ -11,6 +11,7 @@ * more details. */ + #include "gk20a/gk20a.h" #include "clk.h" #include "clk_vf_point.h" @@ -18,7 +19,6 @@ #include "boardobj/boardobjgrp_e32.h" #include "ctrl/ctrlclk.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" static u32 _clk_vf_point_pmudatainit_super(struct gk20a *g, struct boardobj *board_obj_ptr, struct nv_pmu_boardobj *ppmudata); diff --git a/drivers/gpu/nvgpu/clk/clk_vin.c b/drivers/gpu/nvgpu/clk/clk_vin.c index 0fceb89d..04f7b231 100644 --- a/drivers/gpu/nvgpu/clk/clk_vin.c +++ b/drivers/gpu/nvgpu/clk/clk_vin.c @@ -12,18 +12,16 @@ */ #include +#include #include "gk20a/gk20a.h" #include "boardobj/boardobjgrp.h" #include "boardobj/boardobjgrp_e32.h" -#include - #include "ctrl/ctrlvolt.h" #include "gm206/bios_gm206.h" -#include "gk20a/pmu_gk20a.h" #include "clk.h" #include "clk_vin.h" diff --git a/drivers/gpu/nvgpu/common/linux/debug_pmu.c b/drivers/gpu/nvgpu/common/linux/debug_pmu.c index f19f5139..918672db 100644 --- a/drivers/gpu/nvgpu/common/linux/debug_pmu.c +++ b/drivers/gpu/nvgpu/common/linux/debug_pmu.c @@ -252,7 +252,7 @@ static const struct file_operations elpg_transitions_fops = { static int falc_trace_show(struct seq_file *s, void *data) { struct gk20a *g = s->private; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; u32 i = 0, j = 0, k, l, m; char part_str[40]; void *tracebuffer; diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 89b414be..37e2e185 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h @@ -50,6 +50,7 @@ struct gk20a_debug_output; #include #include #include +#include #include "clk_gk20a.h" #include "ce2_gk20a.h" @@ -523,33 +524,33 @@ struct gpu_ops { /*used for change of enum zbc update cmd id from ver 0 to ver1*/ u32 cmd_id_zbc_table_update; bool is_pmu_zbc_save_supported; - u32 (*get_pmu_cmdline_args_size)(struct pmu_gk20a *pmu); - void (*set_pmu_cmdline_args_cpu_freq)(struct pmu_gk20a *pmu, + u32 (*get_pmu_cmdline_args_size)(struct nvgpu_pmu *pmu); + void (*set_pmu_cmdline_args_cpu_freq)(struct nvgpu_pmu *pmu, u32 freq); - void (*set_pmu_cmdline_args_trace_size)(struct pmu_gk20a *pmu, + void (*set_pmu_cmdline_args_trace_size)(struct nvgpu_pmu *pmu, u32 size); void (*set_pmu_cmdline_args_trace_dma_base)( - struct pmu_gk20a *pmu); + struct nvgpu_pmu *pmu); void (*set_pmu_cmdline_args_trace_dma_idx)( - struct pmu_gk20a *pmu, u32 idx); - void * (*get_pmu_cmdline_args_ptr)(struct pmu_gk20a *pmu); - u32 (*get_pmu_allocation_struct_size)(struct pmu_gk20a *pmu); - void (*set_pmu_allocation_ptr)(struct pmu_gk20a *pmu, + struct nvgpu_pmu *pmu, u32 idx); + void * (*get_pmu_cmdline_args_ptr)(struct nvgpu_pmu *pmu); + u32 (*get_pmu_allocation_struct_size)(struct nvgpu_pmu *pmu); + void (*set_pmu_allocation_ptr)(struct nvgpu_pmu *pmu, void **pmu_alloc_ptr, void *assign_ptr); - void (*pmu_allocation_set_dmem_size)(struct pmu_gk20a *pmu, + void (*pmu_allocation_set_dmem_size)(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u16 size); - u16 (*pmu_allocation_get_dmem_size)(struct pmu_gk20a *pmu, + u16 (*pmu_allocation_get_dmem_size)(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); - u32 (*pmu_allocation_get_dmem_offset)(struct pmu_gk20a *pmu, + u32 (*pmu_allocation_get_dmem_offset)(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); u32 * (*pmu_allocation_get_dmem_offset_addr)( - struct pmu_gk20a *pmu, void *pmu_alloc_ptr); - void (*pmu_allocation_set_dmem_offset)(struct pmu_gk20a *pmu, + struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); + void (*pmu_allocation_set_dmem_offset)(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u32 offset); void * (*pmu_allocation_get_fb_addr)( - struct pmu_gk20a *pmu, void *pmu_alloc_ptr); + struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); u32 (*pmu_allocation_get_fb_size)( - struct pmu_gk20a *pmu, void *pmu_alloc_ptr); + struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); void (*get_pmu_init_msg_pmu_queue_params)( struct pmu_queue *queue, u32 id, void *pmu_init_msg); @@ -590,15 +591,15 @@ struct gpu_ops { struct pmu_sequence *seq); void *(*get_pmu_seq_out_a_ptr)( struct pmu_sequence *seq); - void (*set_pmu_cmdline_args_secure_mode)(struct pmu_gk20a *pmu, + void (*set_pmu_cmdline_args_secure_mode)(struct nvgpu_pmu *pmu, u32 val); - u32 (*get_perfmon_cntr_sz)(struct pmu_gk20a *pmu); - void * (*get_perfmon_cntr_ptr)(struct pmu_gk20a *pmu); - void (*set_perfmon_cntr_ut)(struct pmu_gk20a *pmu, u16 ut); - void (*set_perfmon_cntr_lt)(struct pmu_gk20a *pmu, u16 lt); - void (*set_perfmon_cntr_valid)(struct pmu_gk20a *pmu, u8 val); - void (*set_perfmon_cntr_index)(struct pmu_gk20a *pmu, u8 val); - void (*set_perfmon_cntr_group_id)(struct pmu_gk20a *pmu, + u32 (*get_perfmon_cntr_sz)(struct nvgpu_pmu *pmu); + void * (*get_perfmon_cntr_ptr)(struct nvgpu_pmu *pmu); + void (*set_perfmon_cntr_ut)(struct nvgpu_pmu *pmu, u16 ut); + void (*set_perfmon_cntr_lt)(struct nvgpu_pmu *pmu, u16 lt); + void (*set_perfmon_cntr_valid)(struct nvgpu_pmu *pmu, u8 val); + void (*set_perfmon_cntr_index)(struct nvgpu_pmu *pmu, u8 val); + void (*set_perfmon_cntr_group_id)(struct nvgpu_pmu *pmu, u8 gid); u8 (*pg_cmd_eng_buf_load_size)(struct pmu_pg_cmd *pg); @@ -728,7 +729,7 @@ struct gpu_ops { bool (*is_pmu_supported)(struct gk20a *g); int (*prepare_ucode)(struct gk20a *g); int (*pmu_setup_hw_and_bootstrap)(struct gk20a *g); - int (*pmu_nsbootstrap)(struct pmu_gk20a *pmu); + int (*pmu_nsbootstrap)(struct nvgpu_pmu *pmu); int (*pmu_setup_elpg)(struct gk20a *g); u32 (*pmu_get_queue_head)(u32 i); u32 (*pmu_get_queue_head_size)(void); @@ -1014,7 +1015,7 @@ struct gk20a { struct gr_gk20a gr; struct sim_gk20a sim; struct mm_gk20a mm; - struct pmu_gk20a pmu; + struct nvgpu_pmu pmu; struct acr_desc acr; struct ecc_gk20a ecc; struct cooling_device_gk20a gk20a_cdev; @@ -1396,7 +1397,7 @@ static inline struct gk20a *gk20a_from_as(struct gk20a_as *as) { return container_of(as, struct gk20a, as); } -static inline struct gk20a *gk20a_from_pmu(struct pmu_gk20a *pmu) +static inline struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu) { return container_of(pmu, struct gk20a, pmu); } diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_scale.c b/drivers/gpu/nvgpu/gk20a/gk20a_scale.c index 608b2398..c23cdcba 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a_scale.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a_scale.c @@ -24,15 +24,13 @@ #include #include +#include #include "gk20a.h" #include "platform_gk20a.h" -#include "pmu_gk20a.h" #include "clk_gk20a.h" #include "gk20a_scale.h" -#include - /* * gk20a_scale_qos_notify() * diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c index 4bb8304a..1065968b 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c @@ -491,7 +491,7 @@ static ssize_t mscg_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gk20a *g = get_gk20a(dev); - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; unsigned long val = 0; int err; diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 3b46b807..2cf55119 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -5193,7 +5193,7 @@ clean_up: static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; int err = 0; diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index fc46db91..e74a5264 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -56,171 +56,171 @@ static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, static int pmu_init_powergating(struct gk20a *g); -static u32 pmu_perfmon_cntr_sz_v0(struct pmu_gk20a *pmu) +static u32 pmu_perfmon_cntr_sz_v0(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_perfmon_counter_v0); } -static u32 pmu_perfmon_cntr_sz_v2(struct pmu_gk20a *pmu) +static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_perfmon_counter_v2); } -static void *get_perfmon_cntr_ptr_v2(struct pmu_gk20a *pmu) +static void *get_perfmon_cntr_ptr_v2(struct nvgpu_pmu *pmu) { return (void *)(&pmu->perfmon_counter_v2); } -static void *get_perfmon_cntr_ptr_v0(struct pmu_gk20a *pmu) +static void *get_perfmon_cntr_ptr_v0(struct nvgpu_pmu *pmu) { return (void *)(&pmu->perfmon_counter_v0); } -static void set_perfmon_cntr_ut_v2(struct pmu_gk20a *pmu, u16 ut) +static void set_perfmon_cntr_ut_v2(struct nvgpu_pmu *pmu, u16 ut) { pmu->perfmon_counter_v2.upper_threshold = ut; } -static void set_perfmon_cntr_ut_v0(struct pmu_gk20a *pmu, u16 ut) +static void set_perfmon_cntr_ut_v0(struct nvgpu_pmu *pmu, u16 ut) { pmu->perfmon_counter_v0.upper_threshold = ut; } -static void set_perfmon_cntr_lt_v2(struct pmu_gk20a *pmu, u16 lt) +static void set_perfmon_cntr_lt_v2(struct nvgpu_pmu *pmu, u16 lt) { pmu->perfmon_counter_v2.lower_threshold = lt; } -static void set_perfmon_cntr_lt_v0(struct pmu_gk20a *pmu, u16 lt) +static void set_perfmon_cntr_lt_v0(struct nvgpu_pmu *pmu, u16 lt) { pmu->perfmon_counter_v0.lower_threshold = lt; } -static void set_perfmon_cntr_valid_v2(struct pmu_gk20a *pmu, u8 valid) +static void set_perfmon_cntr_valid_v2(struct nvgpu_pmu *pmu, u8 valid) { pmu->perfmon_counter_v2.valid = valid; } -static void set_perfmon_cntr_valid_v0(struct pmu_gk20a *pmu, u8 valid) +static void set_perfmon_cntr_valid_v0(struct nvgpu_pmu *pmu, u8 valid) { pmu->perfmon_counter_v0.valid = valid; } -static void set_perfmon_cntr_index_v2(struct pmu_gk20a *pmu, u8 index) +static void set_perfmon_cntr_index_v2(struct nvgpu_pmu *pmu, u8 index) { pmu->perfmon_counter_v2.index = index; } -static void set_perfmon_cntr_index_v0(struct pmu_gk20a *pmu, u8 index) +static void set_perfmon_cntr_index_v0(struct nvgpu_pmu *pmu, u8 index) { pmu->perfmon_counter_v0.index = index; } -static void set_perfmon_cntr_group_id_v2(struct pmu_gk20a *pmu, u8 gid) +static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid) { pmu->perfmon_counter_v2.group_id = gid; } -static void set_perfmon_cntr_group_id_v0(struct pmu_gk20a *pmu, u8 gid) +static void set_perfmon_cntr_group_id_v0(struct nvgpu_pmu *pmu, u8 gid) { pmu->perfmon_counter_v0.group_id = gid; } -static u32 pmu_cmdline_size_v0(struct pmu_gk20a *pmu) +static u32 pmu_cmdline_size_v0(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_cmdline_args_v0); } -static u32 pmu_cmdline_size_v1(struct pmu_gk20a *pmu) +static u32 pmu_cmdline_size_v1(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_cmdline_args_v1); } -static u32 pmu_cmdline_size_v2(struct pmu_gk20a *pmu) +static u32 pmu_cmdline_size_v2(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_cmdline_args_v2); } -static void set_pmu_cmdline_args_cpufreq_v2(struct pmu_gk20a *pmu, u32 freq) +static void set_pmu_cmdline_args_cpufreq_v2(struct nvgpu_pmu *pmu, u32 freq) { pmu->args_v2.cpu_freq_hz = freq; } -static void set_pmu_cmdline_args_secure_mode_v2(struct pmu_gk20a *pmu, u32 val) +static void set_pmu_cmdline_args_secure_mode_v2(struct nvgpu_pmu *pmu, u32 val) { pmu->args_v2.secure_mode = val; } static void set_pmu_cmdline_args_falctracesize_v2( - struct pmu_gk20a *pmu, u32 size) + struct nvgpu_pmu *pmu, u32 size) { pmu->args_v2.falc_trace_size = size; } -static void set_pmu_cmdline_args_falctracedmabase_v2(struct pmu_gk20a *pmu) +static void set_pmu_cmdline_args_falctracedmabase_v2(struct nvgpu_pmu *pmu) { pmu->args_v2.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; } static void set_pmu_cmdline_args_falctracedmaidx_v2( - struct pmu_gk20a *pmu, u32 idx) + struct nvgpu_pmu *pmu, u32 idx) { pmu->args_v2.falc_trace_dma_idx = idx; } -static void set_pmu_cmdline_args_falctracedmabase_v4(struct pmu_gk20a *pmu) +static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) { pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; pmu->args_v4.dma_addr.dma_base1 = 0; pmu->args_v4.dma_addr.dma_offset = 0; } -static u32 pmu_cmdline_size_v4(struct pmu_gk20a *pmu) +static u32 pmu_cmdline_size_v4(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_cmdline_args_v4); } -static void set_pmu_cmdline_args_cpufreq_v4(struct pmu_gk20a *pmu, u32 freq) +static void set_pmu_cmdline_args_cpufreq_v4(struct nvgpu_pmu *pmu, u32 freq) { pmu->args_v4.cpu_freq_hz = freq; } -static void set_pmu_cmdline_args_secure_mode_v4(struct pmu_gk20a *pmu, u32 val) +static void set_pmu_cmdline_args_secure_mode_v4(struct nvgpu_pmu *pmu, u32 val) { pmu->args_v4.secure_mode = val; } static void set_pmu_cmdline_args_falctracesize_v4( - struct pmu_gk20a *pmu, u32 size) + struct nvgpu_pmu *pmu, u32 size) { pmu->args_v4.falc_trace_size = size; } static void set_pmu_cmdline_args_falctracedmaidx_v4( - struct pmu_gk20a *pmu, u32 idx) + struct nvgpu_pmu *pmu, u32 idx) { pmu->args_v4.falc_trace_dma_idx = idx; } -static u32 pmu_cmdline_size_v5(struct pmu_gk20a *pmu) +static u32 pmu_cmdline_size_v5(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_cmdline_args_v5); } -static void set_pmu_cmdline_args_cpufreq_v5(struct pmu_gk20a *pmu, u32 freq) +static void set_pmu_cmdline_args_cpufreq_v5(struct nvgpu_pmu *pmu, u32 freq) { pmu->args_v5.cpu_freq_hz = 204000000; } -static void set_pmu_cmdline_args_secure_mode_v5(struct pmu_gk20a *pmu, u32 val) +static void set_pmu_cmdline_args_secure_mode_v5(struct nvgpu_pmu *pmu, u32 val) { pmu->args_v5.secure_mode = val; } static void set_pmu_cmdline_args_falctracesize_v5( - struct pmu_gk20a *pmu, u32 size) + struct nvgpu_pmu *pmu, u32 size) { /* set by surface describe */ } -static void set_pmu_cmdline_args_falctracedmabase_v5(struct pmu_gk20a *pmu) +static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -228,53 +228,53 @@ static void set_pmu_cmdline_args_falctracedmabase_v5(struct pmu_gk20a *pmu) } static void set_pmu_cmdline_args_falctracedmaidx_v5( - struct pmu_gk20a *pmu, u32 idx) + struct nvgpu_pmu *pmu, u32 idx) { /* set by surface describe */ } -static u32 pmu_cmdline_size_v3(struct pmu_gk20a *pmu) +static u32 pmu_cmdline_size_v3(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_cmdline_args_v3); } -static void set_pmu_cmdline_args_cpufreq_v3(struct pmu_gk20a *pmu, u32 freq) +static void set_pmu_cmdline_args_cpufreq_v3(struct nvgpu_pmu *pmu, u32 freq) { pmu->args_v3.cpu_freq_hz = freq; } -static void set_pmu_cmdline_args_secure_mode_v3(struct pmu_gk20a *pmu, u32 val) +static void set_pmu_cmdline_args_secure_mode_v3(struct nvgpu_pmu *pmu, u32 val) { pmu->args_v3.secure_mode = val; } static void set_pmu_cmdline_args_falctracesize_v3( - struct pmu_gk20a *pmu, u32 size) + struct nvgpu_pmu *pmu, u32 size) { pmu->args_v3.falc_trace_size = size; } -static void set_pmu_cmdline_args_falctracedmabase_v3(struct pmu_gk20a *pmu) +static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) { pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; } static void set_pmu_cmdline_args_falctracedmaidx_v3( - struct pmu_gk20a *pmu, u32 idx) + struct nvgpu_pmu *pmu, u32 idx) { pmu->args_v3.falc_trace_dma_idx = idx; } -static void set_pmu_cmdline_args_cpufreq_v1(struct pmu_gk20a *pmu, u32 freq) +static void set_pmu_cmdline_args_cpufreq_v1(struct nvgpu_pmu *pmu, u32 freq) { pmu->args_v1.cpu_freq_hz = freq; } -static void set_pmu_cmdline_args_secure_mode_v1(struct pmu_gk20a *pmu, u32 val) +static void set_pmu_cmdline_args_secure_mode_v1(struct nvgpu_pmu *pmu, u32 val) { pmu->args_v1.secure_mode = val; } static void set_pmu_cmdline_args_falctracesize_v1( - struct pmu_gk20a *pmu, u32 size) + struct nvgpu_pmu *pmu, u32 size) { pmu->args_v1.falc_trace_size = size; } @@ -293,7 +293,7 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) return false; } -static void printtrace(struct pmu_gk20a *pmu) +static void printtrace(struct nvgpu_pmu *pmu) { u32 i = 0, j = 0, k, l, m, count; char part_str[40], buf[0x40]; @@ -340,72 +340,72 @@ static void printtrace(struct pmu_gk20a *pmu) nvgpu_kfree(g, tracebuffer); } -static void set_pmu_cmdline_args_falctracedmabase_v1(struct pmu_gk20a *pmu) +static void set_pmu_cmdline_args_falctracedmabase_v1(struct nvgpu_pmu *pmu) { pmu->args_v1.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; } static void set_pmu_cmdline_args_falctracedmaidx_v1( - struct pmu_gk20a *pmu, u32 idx) + struct nvgpu_pmu *pmu, u32 idx) { pmu->args_v1.falc_trace_dma_idx = idx; } -static void set_pmu_cmdline_args_cpufreq_v0(struct pmu_gk20a *pmu, u32 freq) +static void set_pmu_cmdline_args_cpufreq_v0(struct nvgpu_pmu *pmu, u32 freq) { pmu->args_v0.cpu_freq_hz = freq; } -static void *get_pmu_cmdline_args_ptr_v4(struct pmu_gk20a *pmu) +static void *get_pmu_cmdline_args_ptr_v4(struct nvgpu_pmu *pmu) { return (void *)(&pmu->args_v4); } -static void *get_pmu_cmdline_args_ptr_v3(struct pmu_gk20a *pmu) +static void *get_pmu_cmdline_args_ptr_v3(struct nvgpu_pmu *pmu) { return (void *)(&pmu->args_v3); } -static void *get_pmu_cmdline_args_ptr_v2(struct pmu_gk20a *pmu) +static void *get_pmu_cmdline_args_ptr_v2(struct nvgpu_pmu *pmu) { return (void *)(&pmu->args_v2); } -static void *get_pmu_cmdline_args_ptr_v5(struct pmu_gk20a *pmu) +static void *get_pmu_cmdline_args_ptr_v5(struct nvgpu_pmu *pmu) { return (void *)(&pmu->args_v5); } -static void *get_pmu_cmdline_args_ptr_v1(struct pmu_gk20a *pmu) +static void *get_pmu_cmdline_args_ptr_v1(struct nvgpu_pmu *pmu) { return (void *)(&pmu->args_v1); } -static void *get_pmu_cmdline_args_ptr_v0(struct pmu_gk20a *pmu) +static void *get_pmu_cmdline_args_ptr_v0(struct nvgpu_pmu *pmu) { return (void *)(&pmu->args_v0); } -static u32 get_pmu_allocation_size_v3(struct pmu_gk20a *pmu) +static u32 get_pmu_allocation_size_v3(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_allocation_v3); } -static u32 get_pmu_allocation_size_v2(struct pmu_gk20a *pmu) +static u32 get_pmu_allocation_size_v2(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_allocation_v2); } -static u32 get_pmu_allocation_size_v1(struct pmu_gk20a *pmu) +static u32 get_pmu_allocation_size_v1(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_allocation_v1); } -static u32 get_pmu_allocation_size_v0(struct pmu_gk20a *pmu) +static u32 get_pmu_allocation_size_v0(struct nvgpu_pmu *pmu) { return sizeof(struct pmu_allocation_v0); } -static void set_pmu_allocation_ptr_v3(struct pmu_gk20a *pmu, +static void set_pmu_allocation_ptr_v3(struct nvgpu_pmu *pmu, void **pmu_alloc_ptr, void *assign_ptr) { struct pmu_allocation_v3 **pmu_a_ptr = @@ -413,7 +413,7 @@ static void set_pmu_allocation_ptr_v3(struct pmu_gk20a *pmu, *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; } -static void set_pmu_allocation_ptr_v2(struct pmu_gk20a *pmu, +static void set_pmu_allocation_ptr_v2(struct nvgpu_pmu *pmu, void **pmu_alloc_ptr, void *assign_ptr) { struct pmu_allocation_v2 **pmu_a_ptr = @@ -421,7 +421,7 @@ static void set_pmu_allocation_ptr_v2(struct pmu_gk20a *pmu, *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; } -static void set_pmu_allocation_ptr_v1(struct pmu_gk20a *pmu, +static void set_pmu_allocation_ptr_v1(struct nvgpu_pmu *pmu, void **pmu_alloc_ptr, void *assign_ptr) { struct pmu_allocation_v1 **pmu_a_ptr = @@ -429,7 +429,7 @@ static void set_pmu_allocation_ptr_v1(struct pmu_gk20a *pmu, *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; } -static void set_pmu_allocation_ptr_v0(struct pmu_gk20a *pmu, +static void set_pmu_allocation_ptr_v0(struct nvgpu_pmu *pmu, void **pmu_alloc_ptr, void *assign_ptr) { struct pmu_allocation_v0 **pmu_a_ptr = @@ -437,7 +437,7 @@ static void set_pmu_allocation_ptr_v0(struct pmu_gk20a *pmu, *pmu_a_ptr = (struct pmu_allocation_v0 *)assign_ptr; } -static void pmu_allocation_set_dmem_size_v3(struct pmu_gk20a *pmu, +static void pmu_allocation_set_dmem_size_v3(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u16 size) { struct pmu_allocation_v3 *pmu_a_ptr = @@ -445,7 +445,7 @@ static void pmu_allocation_set_dmem_size_v3(struct pmu_gk20a *pmu, pmu_a_ptr->alloc.dmem.size = size; } -static void pmu_allocation_set_dmem_size_v2(struct pmu_gk20a *pmu, +static void pmu_allocation_set_dmem_size_v2(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u16 size) { struct pmu_allocation_v2 *pmu_a_ptr = @@ -453,7 +453,7 @@ static void pmu_allocation_set_dmem_size_v2(struct pmu_gk20a *pmu, pmu_a_ptr->alloc.dmem.size = size; } -static void pmu_allocation_set_dmem_size_v1(struct pmu_gk20a *pmu, +static void pmu_allocation_set_dmem_size_v1(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u16 size) { struct pmu_allocation_v1 *pmu_a_ptr = @@ -461,7 +461,7 @@ static void pmu_allocation_set_dmem_size_v1(struct pmu_gk20a *pmu, pmu_a_ptr->alloc.dmem.size = size; } -static void pmu_allocation_set_dmem_size_v0(struct pmu_gk20a *pmu, +static void pmu_allocation_set_dmem_size_v0(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u16 size) { struct pmu_allocation_v0 *pmu_a_ptr = @@ -469,7 +469,7 @@ static void pmu_allocation_set_dmem_size_v0(struct pmu_gk20a *pmu, pmu_a_ptr->alloc.dmem.size = size; } -static u16 pmu_allocation_get_dmem_size_v3(struct pmu_gk20a *pmu, +static u16 pmu_allocation_get_dmem_size_v3(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v3 *pmu_a_ptr = @@ -477,7 +477,7 @@ static u16 pmu_allocation_get_dmem_size_v3(struct pmu_gk20a *pmu, return pmu_a_ptr->alloc.dmem.size; } -static u16 pmu_allocation_get_dmem_size_v2(struct pmu_gk20a *pmu, +static u16 pmu_allocation_get_dmem_size_v2(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v2 *pmu_a_ptr = @@ -485,7 +485,7 @@ static u16 pmu_allocation_get_dmem_size_v2(struct pmu_gk20a *pmu, return pmu_a_ptr->alloc.dmem.size; } -static u16 pmu_allocation_get_dmem_size_v1(struct pmu_gk20a *pmu, +static u16 pmu_allocation_get_dmem_size_v1(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v1 *pmu_a_ptr = @@ -493,7 +493,7 @@ static u16 pmu_allocation_get_dmem_size_v1(struct pmu_gk20a *pmu, return pmu_a_ptr->alloc.dmem.size; } -static u16 pmu_allocation_get_dmem_size_v0(struct pmu_gk20a *pmu, +static u16 pmu_allocation_get_dmem_size_v0(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v0 *pmu_a_ptr = @@ -501,7 +501,7 @@ static u16 pmu_allocation_get_dmem_size_v0(struct pmu_gk20a *pmu, return pmu_a_ptr->alloc.dmem.size; } -static u32 pmu_allocation_get_dmem_offset_v3(struct pmu_gk20a *pmu, +static u32 pmu_allocation_get_dmem_offset_v3(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v3 *pmu_a_ptr = @@ -509,7 +509,7 @@ static u32 pmu_allocation_get_dmem_offset_v3(struct pmu_gk20a *pmu, return pmu_a_ptr->alloc.dmem.offset; } -static u32 pmu_allocation_get_dmem_offset_v2(struct pmu_gk20a *pmu, +static u32 pmu_allocation_get_dmem_offset_v2(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v2 *pmu_a_ptr = @@ -517,7 +517,7 @@ static u32 pmu_allocation_get_dmem_offset_v2(struct pmu_gk20a *pmu, return pmu_a_ptr->alloc.dmem.offset; } -static u32 pmu_allocation_get_dmem_offset_v1(struct pmu_gk20a *pmu, +static u32 pmu_allocation_get_dmem_offset_v1(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v1 *pmu_a_ptr = @@ -525,7 +525,7 @@ static u32 pmu_allocation_get_dmem_offset_v1(struct pmu_gk20a *pmu, return pmu_a_ptr->alloc.dmem.offset; } -static u32 pmu_allocation_get_dmem_offset_v0(struct pmu_gk20a *pmu, +static u32 pmu_allocation_get_dmem_offset_v0(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v0 *pmu_a_ptr = @@ -533,7 +533,7 @@ static u32 pmu_allocation_get_dmem_offset_v0(struct pmu_gk20a *pmu, return pmu_a_ptr->alloc.dmem.offset; } -static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct pmu_gk20a *pmu, +static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v3 *pmu_a_ptr = @@ -542,7 +542,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct pmu_gk20a *pmu, } static void *pmu_allocation_get_fb_addr_v3( - struct pmu_gk20a *pmu, void *pmu_alloc_ptr) + struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v3 *pmu_a_ptr = (struct pmu_allocation_v3 *)pmu_alloc_ptr; @@ -550,14 +550,14 @@ static void *pmu_allocation_get_fb_addr_v3( } static u32 pmu_allocation_get_fb_size_v3( - struct pmu_gk20a *pmu, void *pmu_alloc_ptr) + struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v3 *pmu_a_ptr = (struct pmu_allocation_v3 *)pmu_alloc_ptr; return sizeof(pmu_a_ptr->alloc.fb); } -static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct pmu_gk20a *pmu, +static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v2 *pmu_a_ptr = @@ -565,7 +565,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct pmu_gk20a *pmu, return &pmu_a_ptr->alloc.dmem.offset; } -static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct pmu_gk20a *pmu, +static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v1 *pmu_a_ptr = @@ -573,7 +573,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct pmu_gk20a *pmu, return &pmu_a_ptr->alloc.dmem.offset; } -static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct pmu_gk20a *pmu, +static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) { struct pmu_allocation_v0 *pmu_a_ptr = @@ -581,7 +581,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct pmu_gk20a *pmu, return &pmu_a_ptr->alloc.dmem.offset; } -static void pmu_allocation_set_dmem_offset_v3(struct pmu_gk20a *pmu, +static void pmu_allocation_set_dmem_offset_v3(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u32 offset) { struct pmu_allocation_v3 *pmu_a_ptr = @@ -589,7 +589,7 @@ static void pmu_allocation_set_dmem_offset_v3(struct pmu_gk20a *pmu, pmu_a_ptr->alloc.dmem.offset = offset; } -static void pmu_allocation_set_dmem_offset_v2(struct pmu_gk20a *pmu, +static void pmu_allocation_set_dmem_offset_v2(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u32 offset) { struct pmu_allocation_v2 *pmu_a_ptr = @@ -597,7 +597,7 @@ static void pmu_allocation_set_dmem_offset_v2(struct pmu_gk20a *pmu, pmu_a_ptr->alloc.dmem.offset = offset; } -static void pmu_allocation_set_dmem_offset_v1(struct pmu_gk20a *pmu, +static void pmu_allocation_set_dmem_offset_v1(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u32 offset) { struct pmu_allocation_v1 *pmu_a_ptr = @@ -605,7 +605,7 @@ static void pmu_allocation_set_dmem_offset_v1(struct pmu_gk20a *pmu, pmu_a_ptr->alloc.dmem.offset = offset; } -static void pmu_allocation_set_dmem_offset_v0(struct pmu_gk20a *pmu, +static void pmu_allocation_set_dmem_offset_v0(struct nvgpu_pmu *pmu, void *pmu_alloc_ptr, u32 offset) { struct pmu_allocation_v0 *pmu_a_ptr = @@ -1421,7 +1421,7 @@ static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg, pg->eng_buf_load_v2.dma_desc.params |= (value << 24); } -int gk20a_init_pmu(struct pmu_gk20a *pmu) +int gk20a_init_pmu(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); struct pmu_v *pv = &g->ops.pmu_ver; @@ -2214,7 +2214,7 @@ fail_elpg: return err; } -void pmu_copy_from_dmem(struct pmu_gk20a *pmu, +void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, u32 src, u8 *dst, u32 size, u8 port) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -2258,7 +2258,7 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu, return; } -void pmu_copy_to_dmem(struct pmu_gk20a *pmu, +void pmu_copy_to_dmem(struct nvgpu_pmu *pmu, u32 dst, u8 *src, u32 size, u8 port) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -2309,7 +2309,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu, return; } -int pmu_idle(struct pmu_gk20a *pmu) +int pmu_idle(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); struct nvgpu_timeout timeout; @@ -2338,7 +2338,7 @@ int pmu_idle(struct pmu_gk20a *pmu) return 0; } -void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable) +void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -2400,7 +2400,7 @@ void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable) gk20a_dbg_fn("done"); } -int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) +int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable) { struct gk20a *g = gk20a_from_pmu(pmu); struct nvgpu_timeout timeout; @@ -2443,7 +2443,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) } } -static int pmu_enable(struct pmu_gk20a *pmu, bool enable) +static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) { struct gk20a *g = gk20a_from_pmu(pmu); u32 pmc_enable; @@ -2477,7 +2477,7 @@ static int pmu_enable(struct pmu_gk20a *pmu, bool enable) return 0; } -int pmu_reset(struct pmu_gk20a *pmu) +int pmu_reset(struct nvgpu_pmu *pmu) { int err; @@ -2502,7 +2502,7 @@ int pmu_reset(struct pmu_gk20a *pmu) return 0; } -int pmu_bootstrap(struct pmu_gk20a *pmu) +int pmu_bootstrap(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); struct mm_gk20a *mm = &g->mm; @@ -2593,7 +2593,7 @@ int pmu_bootstrap(struct pmu_gk20a *pmu) return 0; } -void pmu_seq_init(struct pmu_gk20a *pmu) +void pmu_seq_init(struct nvgpu_pmu *pmu) { u32 i; @@ -2606,7 +2606,7 @@ void pmu_seq_init(struct pmu_gk20a *pmu) pmu->seq[i].id = i; } -static int pmu_seq_acquire(struct pmu_gk20a *pmu, +static int pmu_seq_acquire(struct nvgpu_pmu *pmu, struct pmu_sequence **pseq) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -2631,7 +2631,7 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu, return 0; } -static void pmu_seq_release(struct pmu_gk20a *pmu, +static void pmu_seq_release(struct nvgpu_pmu *pmu, struct pmu_sequence *seq) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -2649,7 +2649,7 @@ static void pmu_seq_release(struct pmu_gk20a *pmu, clear_bit(seq->id, pmu->pmu_seq_tbl); } -static int pmu_queue_init(struct pmu_gk20a *pmu, +static int pmu_queue_init(struct nvgpu_pmu *pmu, u32 id, union pmu_init_msg_pmu *init) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -2670,7 +2670,7 @@ static int pmu_queue_init(struct pmu_gk20a *pmu, return 0; } -static int pmu_queue_head(struct pmu_gk20a *pmu, struct pmu_queue *queue, +static int pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, u32 *head, bool set) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -2707,7 +2707,7 @@ static int pmu_queue_head(struct pmu_gk20a *pmu, struct pmu_queue *queue, return 0; } -static int pmu_queue_tail(struct pmu_gk20a *pmu, struct pmu_queue *queue, +static int pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, u32 *tail, bool set) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -2745,19 +2745,19 @@ static int pmu_queue_tail(struct pmu_gk20a *pmu, struct pmu_queue *queue, return 0; } -static inline void pmu_queue_read(struct pmu_gk20a *pmu, +static inline void pmu_queue_read(struct nvgpu_pmu *pmu, u32 offset, u8 *dst, u32 size) { pmu_copy_from_dmem(pmu, offset, dst, size, 0); } -static inline void pmu_queue_write(struct pmu_gk20a *pmu, +static inline void pmu_queue_write(struct nvgpu_pmu *pmu, u32 offset, u8 *src, u32 size) { pmu_copy_to_dmem(pmu, offset, src, size, 0); } -int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token) +int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) { struct gk20a *g = gk20a_from_pmu(pmu); struct pmu_mutex *mutex; @@ -2826,7 +2826,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token) return -EBUSY; } -int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token) +int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) { struct gk20a *g = gk20a_from_pmu(pmu); struct pmu_mutex *mutex; @@ -2867,7 +2867,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token) return 0; } -static int pmu_queue_lock(struct pmu_gk20a *pmu, +static int pmu_queue_lock(struct nvgpu_pmu *pmu, struct pmu_queue *queue) { int err; @@ -2884,7 +2884,7 @@ static int pmu_queue_lock(struct pmu_gk20a *pmu, return err; } -static int pmu_queue_unlock(struct pmu_gk20a *pmu, +static int pmu_queue_unlock(struct nvgpu_pmu *pmu, struct pmu_queue *queue) { int err; @@ -2902,7 +2902,7 @@ static int pmu_queue_unlock(struct pmu_gk20a *pmu, } /* called by pmu_read_message, no lock */ -static bool pmu_queue_is_empty(struct pmu_gk20a *pmu, +static bool pmu_queue_is_empty(struct nvgpu_pmu *pmu, struct pmu_queue *queue) { u32 head, tail; @@ -2916,7 +2916,7 @@ static bool pmu_queue_is_empty(struct pmu_gk20a *pmu, return head == tail; } -static bool pmu_queue_has_room(struct pmu_gk20a *pmu, +static bool pmu_queue_has_room(struct nvgpu_pmu *pmu, struct pmu_queue *queue, u32 size, bool *need_rewind) { u32 head, tail; @@ -2946,7 +2946,7 @@ static bool pmu_queue_has_room(struct pmu_gk20a *pmu, return size <= free; } -static int pmu_queue_push(struct pmu_gk20a *pmu, +static int pmu_queue_push(struct nvgpu_pmu *pmu, struct pmu_queue *queue, void *data, u32 size) { @@ -2962,7 +2962,7 @@ static int pmu_queue_push(struct pmu_gk20a *pmu, return 0; } -static int pmu_queue_pop(struct pmu_gk20a *pmu, +static int pmu_queue_pop(struct nvgpu_pmu *pmu, struct pmu_queue *queue, void *data, u32 size, u32 *bytes_read) { @@ -2998,7 +2998,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu, return 0; } -static void pmu_queue_rewind(struct pmu_gk20a *pmu, +static void pmu_queue_rewind(struct nvgpu_pmu *pmu, struct pmu_queue *queue) { struct pmu_cmd cmd; @@ -3022,7 +3022,7 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu, } /* open for read and lock the queue */ -static int pmu_queue_open_read(struct pmu_gk20a *pmu, +static int pmu_queue_open_read(struct nvgpu_pmu *pmu, struct pmu_queue *queue) { int err; @@ -3043,7 +3043,7 @@ static int pmu_queue_open_read(struct pmu_gk20a *pmu, /* open for write and lock the queue make sure there's enough free space for the write */ -static int pmu_queue_open_write(struct pmu_gk20a *pmu, +static int pmu_queue_open_write(struct nvgpu_pmu *pmu, struct pmu_queue *queue, u32 size) { bool rewind = false; @@ -3074,7 +3074,7 @@ static int pmu_queue_open_write(struct pmu_gk20a *pmu, } /* close and unlock the queue */ -static int pmu_queue_close(struct pmu_gk20a *pmu, +static int pmu_queue_close(struct nvgpu_pmu *pmu, struct pmu_queue *queue, bool commit) { if (!queue->opened) @@ -3098,7 +3098,7 @@ static int pmu_queue_close(struct pmu_gk20a *pmu, return 0; } -void gk20a_remove_pmu_support(struct pmu_gk20a *pmu) +void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -3118,7 +3118,7 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu) static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; gk20a_dbg_fn(""); @@ -3129,7 +3129,7 @@ static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) static int gk20a_prepare_ucode(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int err = 0; struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; @@ -3168,7 +3168,7 @@ static int gk20a_prepare_ucode(struct gk20a *g) static int gk20a_init_pmu_setup_sw(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; unsigned int i; @@ -3266,7 +3266,7 @@ skip_init: static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - struct pmu_gk20a *pmu = param; + struct nvgpu_pmu *pmu = param; struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat; gk20a_dbg_fn(""); @@ -3289,7 +3289,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, static int gk20a_init_pmu_setup_hw1(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int err = 0; gk20a_dbg_fn(""); @@ -3327,7 +3327,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g); static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, bool post_change_event) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; pmu->pmu_state = pmu_state; @@ -3343,7 +3343,7 @@ static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, static int nvgpu_pg_init_task(void *arg) { struct gk20a *g = (struct gk20a *)arg; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pg_init *pg_init = &pmu->pg_init; u32 pmu_state = 0; @@ -3396,7 +3396,7 @@ static int nvgpu_pg_init_task(void *arg) static int nvgpu_init_task_pg_init(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; char thread_name[64]; int err = 0; @@ -3415,7 +3415,7 @@ static int nvgpu_init_task_pg_init(struct gk20a *g) int gk20a_init_pmu_bind_fecs(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 desc; int err = 0; @@ -3454,7 +3454,7 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g) static void pmu_setup_hw_load_zbc(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 desc; u32 gr_engine_id; @@ -3489,7 +3489,7 @@ static void pmu_setup_hw_load_zbc(struct gk20a *g) static void pmu_setup_hw_enable_elpg(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; /* * FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to @@ -3532,7 +3532,7 @@ static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr) int gk20a_pmu_reset(struct gk20a *g) { int err; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; err = pmu_reset(pmu); @@ -3592,7 +3592,7 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops) int gk20a_init_pmu_support(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; u32 err; gk20a_dbg_fn(""); @@ -3621,7 +3621,7 @@ int gk20a_init_pmu_support(struct gk20a *g) static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - struct pmu_gk20a *pmu = param; + struct nvgpu_pmu *pmu = param; struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg; gk20a_dbg_fn(""); @@ -3681,7 +3681,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - struct pmu_gk20a *pmu = param; + struct nvgpu_pmu *pmu = param; gk20a_dbg_fn(""); @@ -3704,7 +3704,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; @@ -3766,7 +3766,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) } static int pmu_init_powergating(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; u32 pg_engine_id; u32 pg_engine_id_list = 0; @@ -3795,7 +3795,7 @@ static int pmu_init_powergating(struct gk20a *g) return 0; } -static u8 get_perfmon_id(struct pmu_gk20a *pmu) +static u8 get_perfmon_id(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl; @@ -3824,7 +3824,7 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu) return unit_id; } -static int pmu_init_perfmon(struct pmu_gk20a *pmu) +static int pmu_init_perfmon(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); struct pmu_v *pv = &g->ops.pmu_ver; @@ -3924,7 +3924,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu) return 0; } -static int pmu_process_init_msg(struct pmu_gk20a *pmu, +static int pmu_process_init_msg(struct nvgpu_pmu *pmu, struct pmu_msg *msg) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -4002,7 +4002,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu, return 0; } -static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, +static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, struct pmu_msg *msg, int *status) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -4077,7 +4077,7 @@ clean_up: return false; } -static int pmu_response_handle(struct pmu_gk20a *pmu, +static int pmu_response_handle(struct nvgpu_pmu *pmu, struct pmu_msg *msg) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -4168,14 +4168,14 @@ static int pmu_response_handle(struct pmu_gk20a *pmu, static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status) { - struct pmu_gk20a *pmu = param; + struct nvgpu_pmu *pmu = param; gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); pmu->zbc_save_done = 1; } void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; @@ -4199,7 +4199,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) nvgpu_err(g, "ZBC save timeout"); } -int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) +int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); struct pmu_v *pv = &g->ops.pmu_ver; @@ -4243,7 +4243,7 @@ int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) return 0; } -int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu) +int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); struct pmu_cmd cmd; @@ -4261,7 +4261,7 @@ int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu) return 0; } -static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu, +static int pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, struct pmu_perfmon_msg *msg) { gk20a_dbg_fn(""); @@ -4294,7 +4294,7 @@ static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu, } -static int pmu_handle_therm_event(struct pmu_gk20a *pmu, +static int pmu_handle_therm_event(struct nvgpu_pmu *pmu, struct nv_pmu_therm_msg *msg) { gk20a_dbg_fn(""); @@ -4318,7 +4318,7 @@ static int pmu_handle_therm_event(struct pmu_gk20a *pmu, return 0; } -static int pmu_handle_event(struct pmu_gk20a *pmu, struct pmu_msg *msg) +static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg) { int err = 0; struct gk20a *g = gk20a_from_pmu(pmu); @@ -4347,7 +4347,7 @@ static int pmu_handle_event(struct pmu_gk20a *pmu, struct pmu_msg *msg) return err; } -static int pmu_process_message(struct pmu_gk20a *pmu) +static int pmu_process_message(struct nvgpu_pmu *pmu) { struct pmu_msg msg; int status; @@ -4383,7 +4383,7 @@ static int pmu_process_message(struct pmu_gk20a *pmu) return 0; } -int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, +int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, u32 *var, u32 val) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -4411,7 +4411,7 @@ int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, return -ETIMEDOUT; } -static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu) +static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); struct pmu_pg_stats stats; @@ -4484,7 +4484,7 @@ static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu) */ } -void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) +void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); unsigned int i; @@ -4610,7 +4610,7 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) void gk20a_pmu_isr(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_queue *queue; u32 intr, mask; bool recheck = false; @@ -4672,7 +4672,7 @@ void gk20a_pmu_isr(struct gk20a *g) nvgpu_mutex_release(&pmu->isr_mutex); } -static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, +static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, struct pmu_msg *msg, struct pmu_payload *payload, u32 queue_id) { @@ -4742,7 +4742,7 @@ invalid_cmd: return false; } -static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, +static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, u32 queue_id, unsigned long timeout_ms) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -4832,7 +4832,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, u32 queue_id, pmu_callback callback, void* cb_param, u32 *seq_desc, unsigned long timeout) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_v *pv = &g->ops.pmu_ver; struct pmu_sequence *seq; void *in = NULL, *out = NULL; @@ -5022,7 +5022,7 @@ int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq, status; @@ -5057,7 +5057,7 @@ static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) int gk20a_pmu_enable_elpg(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct gr_gk20a *gr = &g->gr; u32 pg_engine_id; u32 pg_engine_id_list = 0; @@ -5115,7 +5115,7 @@ exit_unlock: int gk20a_pmu_disable_elpg(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; int ret = 0; @@ -5225,7 +5225,7 @@ exit_unlock: int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int err; gk20a_dbg_fn(""); @@ -5240,7 +5240,7 @@ int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) int gk20a_pmu_destroy(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_pg_stats_data pg_stat_data = { 0 }; struct nvgpu_timeout timeout; int i; @@ -5306,7 +5306,7 @@ int gk20a_pmu_load_norm(struct gk20a *g, u32 *load) int gk20a_pmu_load_update(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; u16 _load = 0; if (!pmu->perfmon_ready) { @@ -5354,7 +5354,7 @@ void gk20a_pmu_reset_load_counters(struct gk20a *g) void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_pg_stats stats; pmu_copy_from_dmem(pmu, @@ -5372,7 +5372,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; u32 pg_engine_id_list = 0; if (!pmu->initialized) { @@ -5396,7 +5396,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, int gk20a_pmu_ap_send_command(struct gk20a *g, union pmu_ap_cmd *p_ap_cmd, bool b_block) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; /* FIXME: where is the PG structure defined?? */ u32 status = 0; struct pmu_cmd cmd; diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index cfcf3947..3941d90f 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h @@ -24,14 +24,10 @@ #include #include #include +#include struct nvgpu_firmware; -/* defined by pmu hw spec */ -#define GK20A_PMU_VA_SIZE (512 * 1024 * 1024) -#define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024) -#define GK20A_PMU_SEQ_BUF_SIZE 4096 - #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) #define APP_VERSION_NC_3 21688026 @@ -56,127 +52,11 @@ struct nvgpu_firmware; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 #endif -#define PMU_MODE_MISMATCH_STATUS_MAILBOX_R 6 -#define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEAD - -enum { - GK20A_PMU_DMAIDX_UCODE = 0, - GK20A_PMU_DMAIDX_VIRT = 1, - GK20A_PMU_DMAIDX_PHYS_VID = 2, - GK20A_PMU_DMAIDX_PHYS_SYS_COH = 3, - GK20A_PMU_DMAIDX_PHYS_SYS_NCOH = 4, - GK20A_PMU_DMAIDX_RSVD = 5, - GK20A_PMU_DMAIDX_PELPG = 6, - GK20A_PMU_DMAIDX_END = 7 -}; - -#define GK20A_PMU_TRACE_BUFSIZE 0x4000 /* 4K */ -#define GK20A_PMU_DMEM_BLKSIZE2 8 - -#define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32 -#define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64 - -struct pmu_ucode_desc { - u32 descriptor_size; - u32 image_size; - u32 tools_version; - u32 app_version; - char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH]; - u32 bootloader_start_offset; - u32 bootloader_size; - u32 bootloader_imem_offset; - u32 bootloader_entry_point; - u32 app_start_offset; - u32 app_size; - u32 app_imem_offset; - u32 app_imem_entry; - u32 app_dmem_offset; - u32 app_resident_code_offset; /* Offset from appStartOffset */ - u32 app_resident_code_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */ - u32 app_resident_data_offset; /* Offset from appStartOffset */ - u32 app_resident_data_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */ - u32 nb_overlays; - struct {u32 start; u32 size;} load_ovl[GK20A_PMU_UCODE_NB_MAX_OVERLAY]; - u32 compressed; -}; - -struct pmu_ucode_desc_v1 { - u32 descriptor_size; - u32 image_size; - u32 tools_version; - u32 app_version; - char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH]; - u32 bootloader_start_offset; - u32 bootloader_size; - u32 bootloader_imem_offset; - u32 bootloader_entry_point; - u32 app_start_offset; - u32 app_size; - u32 app_imem_offset; - u32 app_imem_entry; - u32 app_dmem_offset; - u32 app_resident_code_offset; - u32 app_resident_code_size; - u32 app_resident_data_offset; - u32 app_resident_data_size; - u32 nb_imem_overlays; - u32 nb_dmem_overlays; - struct {u32 start; u32 size; } load_ovl[64]; - u32 compressed; -}; #define PMU_PGENG_GR_BUFFER_IDX_INIT (0) #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) -struct pmu_gk20a; -struct pmu_queue; - -struct pmu_queue { - - /* used by hw, for BIOS/SMI queue */ - u32 mutex_id; - u32 mutex_lock; - /* used by sw, for LPQ/HPQ queue */ - struct nvgpu_mutex mutex; - - /* current write position */ - u32 position; - /* physical dmem offset where this queue begins */ - u32 offset; - /* logical queue identifier */ - u32 id; - /* physical queue index */ - u32 index; - /* in bytes */ - u32 size; - - /* open-flag */ - u32 oflag; - bool opened; /* opened implies locked */ -}; - -struct pmu_mutex { - u32 id; - u32 index; - u32 ref_cnt; -}; - -#define PMU_MAX_NUM_SEQUENCES (256) -#define PMU_SEQ_BIT_SHIFT (5) -#define PMU_SEQ_TBL_SIZE \ - (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT) - -#define PMU_INVALID_SEQ_DESC (~0) - -enum -{ - PMU_SEQ_STATE_FREE = 0, - PMU_SEQ_STATE_PENDING, - PMU_SEQ_STATE_USED, - PMU_SEQ_STATE_CANCELLED -}; - struct pmu_payload { struct { void *buf; @@ -192,33 +72,6 @@ struct pmu_surface { struct flcn_mem_desc_v0 params; }; -typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32, - u32); - -struct pmu_sequence { - u8 id; - u32 state; - u32 desc; - struct pmu_msg *msg; - union { - struct pmu_allocation_v0 in_v0; - struct pmu_allocation_v1 in_v1; - struct pmu_allocation_v2 in_v2; - struct pmu_allocation_v3 in_v3; - }; - struct nvgpu_mem *in_mem; - union { - struct pmu_allocation_v0 out_v0; - struct pmu_allocation_v1 out_v1; - struct pmu_allocation_v2 out_v2; - struct pmu_allocation_v3 out_v3; - }; - struct nvgpu_mem *out_mem; - u8 *out_payload; - pmu_callback callback; - void* cb_params; -}; - /*PG defines used by nvpgu-pmu*/ struct pmu_pg_stats_data { u32 gating_cnt; @@ -263,147 +116,6 @@ struct pmu_pg_stats_data { #define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200) /*PG defines used by nvpgu-pmu*/ -/* Falcon Register index */ -#define PMU_FALCON_REG_R0 (0) -#define PMU_FALCON_REG_R1 (1) -#define PMU_FALCON_REG_R2 (2) -#define PMU_FALCON_REG_R3 (3) -#define PMU_FALCON_REG_R4 (4) -#define PMU_FALCON_REG_R5 (5) -#define PMU_FALCON_REG_R6 (6) -#define PMU_FALCON_REG_R7 (7) -#define PMU_FALCON_REG_R8 (8) -#define PMU_FALCON_REG_R9 (9) -#define PMU_FALCON_REG_R10 (10) -#define PMU_FALCON_REG_R11 (11) -#define PMU_FALCON_REG_R12 (12) -#define PMU_FALCON_REG_R13 (13) -#define PMU_FALCON_REG_R14 (14) -#define PMU_FALCON_REG_R15 (15) -#define PMU_FALCON_REG_IV0 (16) -#define PMU_FALCON_REG_IV1 (17) -#define PMU_FALCON_REG_UNDEFINED (18) -#define PMU_FALCON_REG_EV (19) -#define PMU_FALCON_REG_SP (20) -#define PMU_FALCON_REG_PC (21) -#define PMU_FALCON_REG_IMB (22) -#define PMU_FALCON_REG_DMB (23) -#define PMU_FALCON_REG_CSW (24) -#define PMU_FALCON_REG_CCR (25) -#define PMU_FALCON_REG_SEC (26) -#define PMU_FALCON_REG_CTX (27) -#define PMU_FALCON_REG_EXCI (28) -#define PMU_FALCON_REG_RSVD0 (29) -#define PMU_FALCON_REG_RSVD1 (30) -#define PMU_FALCON_REG_RSVD2 (31) -#define PMU_FALCON_REG_SIZE (32) - -/* Choices for pmu_state */ -#define PMU_STATE_OFF 0 /* PMU is off */ -#define PMU_STATE_STARTING 1 /* PMU is on, but not booted */ -#define PMU_STATE_INIT_RECEIVED 2 /* PMU init message received */ -#define PMU_STATE_ELPG_BOOTING 3 /* PMU is booting */ -#define PMU_STATE_ELPG_BOOTED 4 /* ELPG is initialized */ -#define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */ -#define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */ -#define PMU_STATE_STARTED 7 /* Fully unitialized */ -#define PMU_STATE_EXIT 8 /* Exit PMU state machine */ - -struct nvgpu_pg_init { - bool state_change; - struct nvgpu_cond wq; - struct nvgpu_thread state_task; -}; - -struct pmu_gk20a { - - union { - struct pmu_ucode_desc *desc; - struct pmu_ucode_desc_v1 *desc_v1; - }; - struct nvgpu_mem ucode; - - struct nvgpu_mem pg_buf; - /* TBD: remove this if ZBC seq is fixed */ - struct nvgpu_mem seq_buf; - struct nvgpu_mem trace_buf; - struct nvgpu_mem wpr_buf; - bool buf_loaded; - - struct pmu_sha1_gid gid_info; - - struct pmu_queue queue[PMU_QUEUE_COUNT]; - - struct pmu_sequence *seq; - unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE]; - u32 next_seq_desc; - - struct pmu_mutex *mutex; - u32 mutex_cnt; - - struct nvgpu_mutex pmu_copy_lock; - struct nvgpu_mutex pmu_seq_lock; - - struct nvgpu_allocator dmem; - - u32 *ucode_image; - bool pmu_ready; - - u32 zbc_save_done; - - u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE]; - - u32 elpg_stat; - - u32 mscg_stat; - u32 mscg_transition_state; - - int pmu_state; - -#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ - struct nvgpu_pg_init pg_init; - struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */ - struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */ - int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */ - - union { - struct pmu_perfmon_counter_v2 perfmon_counter_v2; - struct pmu_perfmon_counter_v0 perfmon_counter_v0; - }; - u32 perfmon_state_id[PMU_DOMAIN_GROUP_NUM]; - - bool initialized; - - void (*remove_support)(struct pmu_gk20a *pmu); - bool sw_ready; - bool perfmon_ready; - - u32 sample_buffer; - u32 load_shadow; - u32 load_avg; - - struct nvgpu_mutex isr_mutex; - bool isr_enabled; - - bool zbc_ready; - union { - struct pmu_cmdline_args_v0 args_v0; - struct pmu_cmdline_args_v1 args_v1; - struct pmu_cmdline_args_v2 args_v2; - struct pmu_cmdline_args_v3 args_v3; - struct pmu_cmdline_args_v4 args_v4; - struct pmu_cmdline_args_v5 args_v5; - }; - unsigned long perfmon_events_cnt; - bool perfmon_sampling_enabled; - u8 pmu_mode; /*Added for GM20b, and ACR*/ - u32 falcon_id; - u32 aelpg_param[5]; - u32 override_done; - - struct nvgpu_firmware *fw; -}; - int gk20a_init_pmu_support(struct gk20a *g); int gk20a_init_pmu_bind_fecs(struct gk20a *g); @@ -426,8 +138,8 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries); int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); -int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token); -int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token); +int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); +int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); int gk20a_pmu_destroy(struct gk20a *g); int gk20a_pmu_load_norm(struct gk20a *g, u32 *load); int gk20a_pmu_load_update(struct gk20a *g); @@ -436,33 +148,33 @@ void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, u32 *total_cycles); void gk20a_init_pmu_ops(struct gpu_ops *gops); -void pmu_copy_to_dmem(struct pmu_gk20a *pmu, +void pmu_copy_to_dmem(struct nvgpu_pmu *pmu, u32 dst, u8 *src, u32 size, u8 port); -void pmu_copy_from_dmem(struct pmu_gk20a *pmu, +void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, u32 src, u8 *dst, u32 size, u8 port); -int pmu_reset(struct pmu_gk20a *pmu); -int pmu_bootstrap(struct pmu_gk20a *pmu); -int gk20a_init_pmu(struct pmu_gk20a *pmu); -void pmu_dump_falcon_stats(struct pmu_gk20a *pmu); -void gk20a_remove_pmu_support(struct pmu_gk20a *pmu); -void pmu_seq_init(struct pmu_gk20a *pmu); +int pmu_reset(struct nvgpu_pmu *pmu); +int pmu_bootstrap(struct nvgpu_pmu *pmu); +int gk20a_init_pmu(struct nvgpu_pmu *pmu); +void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); +void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu); +void pmu_seq_init(struct nvgpu_pmu *pmu); -int gk20a_init_pmu(struct pmu_gk20a *pmu); +int gk20a_init_pmu(struct nvgpu_pmu *pmu); int gk20a_pmu_ap_send_command(struct gk20a *g, union pmu_ap_cmd *p_ap_cmd, bool b_block); int gk20a_aelpg_init(struct gk20a *g); int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id); -void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable); -int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, +void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable); +int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, u32 *var, u32 val); void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, void *param, u32 handle, u32 status); void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data); int gk20a_pmu_reset(struct gk20a *g); -int pmu_idle(struct pmu_gk20a *pmu); -int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable); +int pmu_idle(struct nvgpu_pmu *pmu); +int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable); void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem); void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, @@ -475,7 +187,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data); bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); -int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu); -int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu); +int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu); +int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu); #endif /*__PMU_GK20A_H__*/ diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index 815ae638..666f629e 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -131,7 +132,7 @@ void gm20b_init_secure_pmu(struct gpu_ops *gops) static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) { struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct lsf_ucode_desc *lsf_desc; int err; gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); @@ -382,7 +383,7 @@ int prepare_ucode_blob(struct gk20a *g) int err; struct ls_flcn_mgr lsfm_l, *plsfm; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; phys_addr_t wpr_addr, wpr_page; u32 wprsize; int i; @@ -470,7 +471,7 @@ static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm, static int lsfm_discover_ucode_images(struct gk20a *g, struct ls_flcn_mgr *plsfm) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct flcn_ucode_img ucode_img; u32 falcon_id; u32 i; @@ -555,7 +556,7 @@ static int gm20b_pmu_populate_loader_cfg(struct gk20a *g, void *lsfm, u32 *p_bl_gen_desc_size) { struct wpr_carveout_info wpr_inf; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct lsfm_managed_ucode_img *p_lsfm = (struct lsfm_managed_ucode_img *)lsfm; struct flcn_ucode_img *p_img = &(p_lsfm->ucode_img); @@ -685,7 +686,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, struct lsfm_managed_ucode_img *pnode) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; if (pnode->wpr_header.falcon_id != pmu->falcon_id) { gm20b_dbg_pmu("non pmu. write flcn bl gen desc\n"); g->ops.pmu.flcn_populate_bl_dmem_desc(g, @@ -842,7 +843,7 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g, u32 falcon_id, struct lsfm_managed_ucode_img *pnode) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; u32 full_app_size = 0; u32 data = 0; @@ -1214,7 +1215,7 @@ int acr_ucode_patch_sig(struct gk20a *g, return 0; } -static int bl_bootstrap(struct pmu_gk20a *pmu, +static int bl_bootstrap(struct nvgpu_pmu *pmu, struct flcn_bl_dmem_desc *pbl_desc, u32 bl_sz) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -1284,7 +1285,7 @@ static int bl_bootstrap(struct pmu_gk20a *pmu, int gm20b_init_nspmu_setup_hw1(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int err = 0; gk20a_dbg_fn(""); @@ -1319,7 +1320,7 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g, void *desc, u32 bl_sz) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int err; gk20a_dbg_fn(""); diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c index 109b3ca5..34c2c373 100644 --- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c @@ -15,14 +15,15 @@ #include +#include +#include + #include "gk20a/gk20a.h" #include "gk20a/pmu_gk20a.h" #include "acr_gm20b.h" #include "pmu_gm20b.h" -#include - #include #include #include @@ -138,7 +139,7 @@ static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg, int gm20b_pmu_init_acr(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; @@ -198,7 +199,7 @@ static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms, void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index 5570489e..c7d71ab0 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "gk20a/gk20a.h" #include "gk20a/pmu_gk20a.h" @@ -140,7 +141,7 @@ void gp106_init_secure_pmu(struct gpu_ops *gops) static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) { struct nvgpu_firmware *pmu_fw, *pmu_desc, *pmu_sig; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct lsf_ucode_desc_v1 *lsf_desc; int err; @@ -382,7 +383,7 @@ static int gp106_prepare_ucode_blob(struct gk20a *g) int err; struct ls_flcn_mgr_v1 lsfm_l, *plsfm; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct wpr_carveout_info wpr_inf; if (g->acr.ucode_blob.cpu_va) { @@ -445,7 +446,7 @@ static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm, static int lsfm_discover_ucode_images(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct flcn_ucode_img_v1 ucode_img; u32 falcon_id; u32 i; @@ -531,7 +532,7 @@ static int gp106_pmu_populate_loader_cfg(struct gk20a *g, void *lsfm, u32 *p_bl_gen_desc_size) { struct wpr_carveout_info wpr_inf; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct lsfm_managed_ucode_img_v2 *p_lsfm = (struct lsfm_managed_ucode_img_v2 *)lsfm; struct flcn_ucode_img_v1 *p_img = &(p_lsfm->ucode_img); @@ -661,7 +662,7 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g, struct lsfm_managed_ucode_img_v2 *pnode) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; if (pnode->wpr_header.falcon_id != pmu->falcon_id) { gp106_dbg_pmu("non pmu. write flcn bl gen desc\n"); g->ops.pmu.flcn_populate_bl_dmem_desc(g, @@ -818,7 +819,7 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g, u32 falcon_id, struct lsfm_managed_ucode_img_v2 *pnode) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; u32 full_app_size = 0; u32 data = 0; diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c index 467c50be..89144c2a 100644 --- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c +++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c @@ -11,6 +11,8 @@ * more details. */ +#include + #include "gk20a/gk20a.h" #include "gk20a/pmu_gk20a.h" @@ -31,7 +33,7 @@ #define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 #define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 -static int gp106_pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) +static int gp106_pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -86,7 +88,7 @@ static int gp106_pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) } } -static int pmu_enable(struct pmu_gk20a *pmu, bool enable) +static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) { struct gk20a *g = gk20a_from_pmu(pmu); u32 reg_reset; @@ -121,7 +123,7 @@ static int pmu_enable(struct pmu_gk20a *pmu, bool enable) int gp106_pmu_reset(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int err = 0; gk20a_dbg_fn(""); @@ -210,7 +212,7 @@ static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, static int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; u32 status; @@ -262,7 +264,7 @@ static int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_pg_stats_v2 stats; pmu_copy_from_dmem(pmu, @@ -335,7 +337,7 @@ static bool gp106_is_priv_load(u32 falcon_id) static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, u32 flags) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c index 078a1436..9c86c5b5 100644 --- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c +++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c @@ -11,6 +11,8 @@ * more details. */ +#include + #include "gk20a/gk20a.h" #include "gk20a/pmu_gk20a.h" @@ -88,7 +90,7 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout) return completion; } -void sec2_copy_to_dmem(struct pmu_gk20a *pmu, +void sec2_copy_to_dmem(struct nvgpu_pmu *pmu, u32 dst, u8 *src, u32 size, u8 port) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -139,7 +141,7 @@ void sec2_copy_to_dmem(struct pmu_gk20a *pmu, return; } -int bl_bootstrap_sec2(struct pmu_gk20a *pmu, +int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, void *desc, u32 bl_sz) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -223,7 +225,7 @@ int bl_bootstrap_sec2(struct pmu_gk20a *pmu, return 0; } -void sec_enable_irq(struct pmu_gk20a *pmu, bool enable) +void sec_enable_irq(struct nvgpu_pmu *pmu, bool enable) { struct gk20a *g = gk20a_from_pmu(pmu); @@ -281,7 +283,7 @@ void sec_enable_irq(struct pmu_gk20a *pmu, bool enable) void init_pmu_setup_hw1(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; /* PMU TRANSCFG */ /* setup apertures - virtual */ @@ -331,7 +333,7 @@ void init_pmu_setup_hw1(struct gk20a *g) int init_sec2_setup_hw1(struct gk20a *g, void *desc, u32 bl_sz) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int err; u32 data = 0; diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.h b/drivers/gpu/nvgpu/gp106/sec2_gp106.h index 336bb0f0..e3da0abf 100644 --- a/drivers/gpu/nvgpu/gp106/sec2_gp106.h +++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -16,12 +16,12 @@ int sec2_clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout); int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout); -void sec2_copy_to_dmem(struct pmu_gk20a *pmu, +void sec2_copy_to_dmem(struct nvgpu_pmu *pmu, u32 dst, u8 *src, u32 size, u8 port); -void sec2_dump_falcon_stats(struct pmu_gk20a *pmu); -int bl_bootstrap_sec2(struct pmu_gk20a *pmu, +void sec2_dump_falcon_stats(struct nvgpu_pmu *pmu); +int bl_bootstrap_sec2(struct nvgpu_pmu *pmu, void *desc, u32 bl_sz); -void sec_enable_irq(struct pmu_gk20a *pmu, bool enable); +void sec_enable_irq(struct nvgpu_pmu *pmu, bool enable); void init_pmu_setup_hw1(struct gk20a *g); int init_sec2_setup_hw1(struct gk20a *g, void *desc, u32 bl_sz); diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c index 1167006d..1409d279 100644 --- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c @@ -15,6 +15,9 @@ #include +#include +#include + #include "gk20a/gk20a.h" #include "gk20a/pmu_gk20a.h" #include "gm20b/acr_gm20b.h" @@ -23,8 +26,6 @@ #include "pmu_gp10b.h" #include "gp10b_sysfs.h" -#include - #include #include @@ -142,7 +143,7 @@ static struct pg_init_sequence_list _pginitseq_gp10b[] = { static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, u32 flags) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; @@ -226,7 +227,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_cmd cmd; u32 seq; @@ -255,7 +256,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) static void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; struct pmu_pg_stats_v1 stats; pmu_copy_from_dmem(pmu, @@ -301,7 +302,7 @@ void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr) static int gp10b_init_pmu_setup_hw1(struct gk20a *g) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int err; gk20a_dbg_fn(""); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h new file mode 100644 index 00000000..0fcc5710 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __NVGPU_PMU_H__ +#define __NVGPU_PMU_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define nvgpu_pmu_dbg(g, fmt, args...) \ + nvgpu_log(g, gpu_dbg_pmu, fmt, ##args) + +/* defined by pmu hw spec */ +#define GK20A_PMU_VA_SIZE (512 * 1024 * 1024) +#define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024) +#define GK20A_PMU_SEQ_BUF_SIZE 4096 + +#define GK20A_PMU_TRACE_BUFSIZE 0x4000 /* 4K */ +#define GK20A_PMU_DMEM_BLKSIZE2 8 + +#define PMU_MODE_MISMATCH_STATUS_MAILBOX_R 6 +#define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEAD + +/* Falcon Register index */ +#define PMU_FALCON_REG_R0 (0) +#define PMU_FALCON_REG_R1 (1) +#define PMU_FALCON_REG_R2 (2) +#define PMU_FALCON_REG_R3 (3) +#define PMU_FALCON_REG_R4 (4) +#define PMU_FALCON_REG_R5 (5) +#define PMU_FALCON_REG_R6 (6) +#define PMU_FALCON_REG_R7 (7) +#define PMU_FALCON_REG_R8 (8) +#define PMU_FALCON_REG_R9 (9) +#define PMU_FALCON_REG_R10 (10) +#define PMU_FALCON_REG_R11 (11) +#define PMU_FALCON_REG_R12 (12) +#define PMU_FALCON_REG_R13 (13) +#define PMU_FALCON_REG_R14 (14) +#define PMU_FALCON_REG_R15 (15) +#define PMU_FALCON_REG_IV0 (16) +#define PMU_FALCON_REG_IV1 (17) +#define PMU_FALCON_REG_UNDEFINED (18) +#define PMU_FALCON_REG_EV (19) +#define PMU_FALCON_REG_SP (20) +#define PMU_FALCON_REG_PC (21) +#define PMU_FALCON_REG_IMB (22) +#define PMU_FALCON_REG_DMB (23) +#define PMU_FALCON_REG_CSW (24) +#define PMU_FALCON_REG_CCR (25) +#define PMU_FALCON_REG_SEC (26) +#define PMU_FALCON_REG_CTX (27) +#define PMU_FALCON_REG_EXCI (28) +#define PMU_FALCON_REG_RSVD0 (29) +#define PMU_FALCON_REG_RSVD1 (30) +#define PMU_FALCON_REG_RSVD2 (31) +#define PMU_FALCON_REG_SIZE (32) + +/* Choices for pmu_state */ +#define PMU_STATE_OFF 0 /* PMU is off */ +#define PMU_STATE_STARTING 1 /* PMU is on, but not booted */ +#define PMU_STATE_INIT_RECEIVED 2 /* PMU init message received */ +#define PMU_STATE_ELPG_BOOTING 3 /* PMU is booting */ +#define PMU_STATE_ELPG_BOOTED 4 /* ELPG is initialized */ +#define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */ +#define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */ +#define PMU_STATE_STARTED 7 /* Fully unitialized */ +#define PMU_STATE_EXIT 8 /* Exit PMU state machine */ + +#define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32 +#define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64 + +#define PMU_MAX_NUM_SEQUENCES (256) +#define PMU_SEQ_BIT_SHIFT (5) +#define PMU_SEQ_TBL_SIZE \ + (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT) + +#define PMU_INVALID_SEQ_DESC (~0) + +enum { + GK20A_PMU_DMAIDX_UCODE = 0, + GK20A_PMU_DMAIDX_VIRT = 1, + GK20A_PMU_DMAIDX_PHYS_VID = 2, + GK20A_PMU_DMAIDX_PHYS_SYS_COH = 3, + GK20A_PMU_DMAIDX_PHYS_SYS_NCOH = 4, + GK20A_PMU_DMAIDX_RSVD = 5, + GK20A_PMU_DMAIDX_PELPG = 6, + GK20A_PMU_DMAIDX_END = 7 +}; + +enum { + PMU_SEQ_STATE_FREE = 0, + PMU_SEQ_STATE_PENDING, + PMU_SEQ_STATE_USED, + PMU_SEQ_STATE_CANCELLED +}; + +typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32, + u32); + +struct pmu_ucode_desc { + u32 descriptor_size; + u32 image_size; + u32 tools_version; + u32 app_version; + char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH]; + u32 bootloader_start_offset; + u32 bootloader_size; + u32 bootloader_imem_offset; + u32 bootloader_entry_point; + u32 app_start_offset; + u32 app_size; + u32 app_imem_offset; + u32 app_imem_entry; + u32 app_dmem_offset; + /* Offset from appStartOffset */ + u32 app_resident_code_offset; + /* Exact size of the resident code + * ( potentially contains CRC inside at the end ) + */ + u32 app_resident_code_size; + /* Offset from appStartOffset */ + u32 app_resident_data_offset; + /* Exact size of the resident code + * ( potentially contains CRC inside at the end ) + */ + u32 app_resident_data_size; + u32 nb_overlays; + struct {u32 start; u32 size; } load_ovl[GK20A_PMU_UCODE_NB_MAX_OVERLAY]; + u32 compressed; +}; + +struct pmu_ucode_desc_v1 { + u32 descriptor_size; + u32 image_size; + u32 tools_version; + u32 app_version; + char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH]; + u32 bootloader_start_offset; + u32 bootloader_size; + u32 bootloader_imem_offset; + u32 bootloader_entry_point; + u32 app_start_offset; + u32 app_size; + u32 app_imem_offset; + u32 app_imem_entry; + u32 app_dmem_offset; + u32 app_resident_code_offset; + u32 app_resident_code_size; + u32 app_resident_data_offset; + u32 app_resident_data_size; + u32 nb_imem_overlays; + u32 nb_dmem_overlays; + struct {u32 start; u32 size; } load_ovl[64]; + u32 compressed; +}; + +struct pmu_queue { + + /* used by hw, for BIOS/SMI queue */ + u32 mutex_id; + u32 mutex_lock; + /* used by sw, for LPQ/HPQ queue */ + struct nvgpu_mutex mutex; + + /* current write position */ + u32 position; + /* physical dmem offset where this queue begins */ + u32 offset; + /* logical queue identifier */ + u32 id; + /* physical queue index */ + u32 index; + /* in bytes */ + u32 size; + + /* open-flag */ + u32 oflag; + bool opened; /* opened implies locked */ +}; + +struct pmu_mutex { + u32 id; + u32 index; + u32 ref_cnt; +}; + +struct pmu_sequence { + u8 id; + u32 state; + u32 desc; + struct pmu_msg *msg; + union { + struct pmu_allocation_v0 in_v0; + struct pmu_allocation_v1 in_v1; + struct pmu_allocation_v2 in_v2; + struct pmu_allocation_v3 in_v3; + }; + struct nvgpu_mem *in_mem; + union { + struct pmu_allocation_v0 out_v0; + struct pmu_allocation_v1 out_v1; + struct pmu_allocation_v2 out_v2; + struct pmu_allocation_v3 out_v3; + }; + struct nvgpu_mem *out_mem; + u8 *out_payload; + pmu_callback callback; + void *cb_params; +}; + +struct nvgpu_pg_init { + bool state_change; + struct nvgpu_cond wq; + struct nvgpu_thread state_task; +}; + +struct nvgpu_pmu { + struct gk20a *g; + struct nvgpu_falcon *flcn; + + union { + struct pmu_ucode_desc *desc; + struct pmu_ucode_desc_v1 *desc_v1; + }; + struct nvgpu_mem ucode; + + struct nvgpu_mem pg_buf; + + /* TBD: remove this if ZBC seq is fixed */ + struct nvgpu_mem seq_buf; + struct nvgpu_mem trace_buf; + struct nvgpu_mem wpr_buf; + bool buf_loaded; + + struct pmu_sha1_gid gid_info; + + struct pmu_queue queue[PMU_QUEUE_COUNT]; + + struct pmu_sequence *seq; + unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE]; + u32 next_seq_desc; + + struct pmu_mutex *mutex; + u32 mutex_cnt; + + struct nvgpu_mutex pmu_copy_lock; + struct nvgpu_mutex pmu_seq_lock; + + struct nvgpu_allocator dmem; + + u32 *ucode_image; + bool pmu_ready; + + u32 zbc_save_done; + + u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE]; + + u32 elpg_stat; + + u32 mscg_stat; + u32 mscg_transition_state; + + int pmu_state; + +#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ + struct nvgpu_pg_init pg_init; + struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */ + struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */ + /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */ + int elpg_refcnt; + + union { + struct pmu_perfmon_counter_v2 perfmon_counter_v2; + struct pmu_perfmon_counter_v0 perfmon_counter_v0; + }; + u32 perfmon_state_id[PMU_DOMAIN_GROUP_NUM]; + + bool initialized; + + void (*remove_support)(struct nvgpu_pmu *pmu); + bool sw_ready; + bool perfmon_ready; + + u32 sample_buffer; + u32 load_shadow; + u32 load_avg; + + struct nvgpu_mutex isr_mutex; + bool isr_enabled; + + bool zbc_ready; + union { + struct pmu_cmdline_args_v0 args_v0; + struct pmu_cmdline_args_v1 args_v1; + struct pmu_cmdline_args_v2 args_v2; + struct pmu_cmdline_args_v3 args_v3; + struct pmu_cmdline_args_v4 args_v4; + struct pmu_cmdline_args_v5 args_v5; + }; + unsigned long perfmon_events_cnt; + bool perfmon_sampling_enabled; + u8 pmu_mode; /*Added for GM20b, and ACR*/ + u32 falcon_id; + u32 aelpg_param[5]; + u32 override_done; + + struct nvgpu_firmware *fw; +}; + +#endif /* __NVGPU_PMU_H__ */ diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.c b/drivers/gpu/nvgpu/lpwr/lpwr.c index 099c81d6..85acfd67 100644 --- a/drivers/gpu/nvgpu/lpwr/lpwr.c +++ b/drivers/gpu/nvgpu/lpwr/lpwr.c @@ -12,10 +12,9 @@ */ #include +#include #include "gk20a/gk20a.h" -#include "gk20a/pmu_gk20a.h" -#include "gp106/pmu_gp106.h" #include "gm206/bios_gm206.h" #include "pstate/pstate.h" #include "perf/perf.h" @@ -207,7 +206,7 @@ static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g, *ack_status = 1; - gp106_dbg_pmu("lpwr-param is acknowledged from PMU %x", + nvgpu_pmu_dbg(g, "lpwr-param is acknowledged from PMU %x", msg->msg.pg.msg_type); } @@ -243,7 +242,7 @@ int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate) PMU_PG_PARAM_CMD_MCLK_CHANGE; cmd.cmd.pg.mclk_change.data = payload; - gp106_dbg_pmu("cmd post MS PMU_PG_PARAM_CMD_MCLK_CHANGE"); + nvgpu_pmu_dbg(g, "cmd post MS PMU_PG_PARAM_CMD_MCLK_CHANGE"); status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0); @@ -276,7 +275,7 @@ u32 nvgpu_lpwr_post_init(struct gk20a *g) cmd.cmd.pg.post_init.cmd_id = PMU_PG_PARAM_CMD_POST_INIT; - gp106_dbg_pmu("cmd post post-init PMU_PG_PARAM_CMD_POST_INIT"); + nvgpu_pmu_dbg(g, "cmd post post-init PMU_PG_PARAM_CMD_POST_INIT"); status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0); @@ -336,7 +335,7 @@ u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num) int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; u32 status = 0; u32 is_mscg_supported = 0; u32 is_rppg_supported = 0; @@ -376,7 +375,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock) int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock) { - struct pmu_gk20a *pmu = &g->pmu; + struct nvgpu_pmu *pmu = &g->pmu; int status = 0; u32 is_mscg_supported = 0; u32 is_rppg_supported = 0; diff --git a/drivers/gpu/nvgpu/lpwr/rppg.c b/drivers/gpu/nvgpu/lpwr/rppg.c index 59948f35..553457f7 100644 --- a/drivers/gpu/nvgpu/lpwr/rppg.c +++ b/drivers/gpu/nvgpu/lpwr/rppg.c @@ -11,9 +11,9 @@ * more details. */ +#include + #include "gk20a/gk20a.h" -#include "gk20a/pmu_gk20a.h" -#include "gp106/pmu_gp106.h" #include "gm206/bios_gm206.h" #include "pstate/pstate.h" @@ -29,13 +29,13 @@ static void pmu_handle_rppg_init_msg(struct gk20a *g, struct pmu_msg *msg, case NV_PMU_RPPG_MSG_ID_INIT_CTRL_ACK: ctrlId = msg->msg.pg.rppg_msg.init_ctrl_ack.ctrl_id; *success = 1; - gp106_dbg_pmu("RPPG is acknowledged from PMU %x", + nvgpu_pmu_dbg(g, "RPPG is acknowledged from PMU %x", msg->msg.pg.msg_type); break; } } - gp106_dbg_pmu("RPPG is acknowledged from PMU %x", + nvgpu_pmu_dbg(g, "RPPG is acknowledged from PMU %x", msg->msg.pg.msg_type); } diff --git a/drivers/gpu/nvgpu/perf/perf.c b/drivers/gpu/nvgpu/perf/perf.c index 9adcadb6..f07a1ffd 100644 --- a/drivers/gpu/nvgpu/perf/perf.c +++ b/drivers/gpu/nvgpu/perf/perf.c @@ -11,13 +11,13 @@ * more details. */ +#include +#include + #include "gk20a/gk20a.h" #include "perf.h" -#include "gk20a/pmu_gk20a.h" #include "clk/clk_arb.h" -#include - struct perfrpc_pmucmdhandler_params { struct nv_pmu_perf_rpc *prpccall; u32 success; diff --git a/drivers/gpu/nvgpu/perf/vfe_equ.c b/drivers/gpu/nvgpu/perf/vfe_equ.c index bb443265..bb9939f9 100644 --- a/drivers/gpu/nvgpu/perf/vfe_equ.c +++ b/drivers/gpu/nvgpu/perf/vfe_equ.c @@ -20,7 +20,6 @@ #include "boardobj/boardobjgrp_e255.h" #include "ctrl/ctrlclk.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" static struct vfe_equ *construct_vfe_equ(struct gk20a *g, void *pargs); static u32 devinit_get_vfe_equ_table(struct gk20a *g, diff --git a/drivers/gpu/nvgpu/perf/vfe_var.c b/drivers/gpu/nvgpu/perf/vfe_var.c index c1f87c25..f3f8ee02 100644 --- a/drivers/gpu/nvgpu/perf/vfe_var.c +++ b/drivers/gpu/nvgpu/perf/vfe_var.c @@ -20,7 +20,6 @@ #include "boardobj/boardobjgrp_e32.h" #include "ctrl/ctrlclk.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" static u32 devinit_get_vfe_var_table(struct gk20a *g, struct vfe_vars *pvarobjs); diff --git a/drivers/gpu/nvgpu/pmgr/pmgrpmu.c b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c index 7445d81c..7abe4976 100644 --- a/drivers/gpu/nvgpu/pmgr/pmgrpmu.c +++ b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c @@ -13,11 +13,11 @@ #include #include +#include #include "gk20a/gk20a.h" #include "gk20a/platform_gk20a.h" #include "gm206/bios_gm206.h" -#include "gk20a/pmu_gk20a.h" #include "boardobj/boardobjgrp.h" #include "boardobj/boardobjgrp_e32.h" diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.c b/drivers/gpu/nvgpu/pmgr/pwrdev.c index 3823c4e9..0dbbd2c2 100644 --- a/drivers/gpu/nvgpu/pmgr/pwrdev.c +++ b/drivers/gpu/nvgpu/pmgr/pwrdev.c @@ -18,7 +18,6 @@ #include "boardobj/boardobjgrp.h" #include "boardobj/boardobjgrp_e32.h" #include "gm206/bios_gm206.h" -#include "gk20a/pmu_gk20a.h" static u32 _pwr_device_pmudata_instget(struct gk20a *g, struct nv_pmu_boardobjgrp *pmuboardobjgrp, diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c index 05636001..330d23c8 100644 --- a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c +++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c @@ -18,7 +18,6 @@ #include "boardobj/boardobjgrp.h" #include "boardobj/boardobjgrp_e32.h" #include "gm206/bios_gm206.h" -#include "gk20a/pmu_gk20a.h" static u32 _pwr_channel_pmudata_instget(struct gk20a *g, struct nv_pmu_boardobjgrp *pmuboardobjgrp, diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c index c7384b6e..74dc7a6a 100644 --- a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c +++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c @@ -20,7 +20,6 @@ #include "boardobj/boardobjgrp.h" #include "boardobj/boardobjgrp_e32.h" #include "gm206/bios_gm206.h" -#include "gk20a/pmu_gk20a.h" #define _pwr_policy_limitarboutputget_helper(p_limit_arb) (p_limit_arb)->output #define _pwr_policy_limitdeltaapply(limit, delta) ((u32)max(((s32)limit) + (delta), 0)) diff --git a/drivers/gpu/nvgpu/therm/thrmchannel.c b/drivers/gpu/nvgpu/therm/thrmchannel.c index 556bad86..da2fbc9e 100644 --- a/drivers/gpu/nvgpu/therm/thrmchannel.c +++ b/drivers/gpu/nvgpu/therm/thrmchannel.c @@ -12,14 +12,13 @@ */ #include +#include #include "gk20a/gk20a.h" #include "thrmchannel.h" #include "boardobj/boardobjgrp.h" #include "boardobj/boardobjgrp_e32.h" -#include #include "gm206/bios_gm206.h" -#include "gk20a/pmu_gk20a.h" static u32 _therm_channel_pmudatainit_device(struct gk20a *g, struct boardobj *board_obj_ptr, diff --git a/drivers/gpu/nvgpu/therm/thrmdev.c b/drivers/gpu/nvgpu/therm/thrmdev.c index 4772b628..782939f0 100644 --- a/drivers/gpu/nvgpu/therm/thrmdev.c +++ b/drivers/gpu/nvgpu/therm/thrmdev.c @@ -12,14 +12,13 @@ */ #include +#include #include "gk20a/gk20a.h" #include "thrmdev.h" #include "boardobj/boardobjgrp.h" #include "boardobj/boardobjgrp_e32.h" -#include #include "gm206/bios_gm206.h" -#include "gk20a/pmu_gk20a.h" #include "ctrl/ctrltherm.h" static struct boardobj *construct_therm_device(struct gk20a *g, diff --git a/drivers/gpu/nvgpu/volt/volt_dev.c b/drivers/gpu/nvgpu/volt/volt_dev.c index 4617b241..e62a8dcf 100644 --- a/drivers/gpu/nvgpu/volt/volt_dev.c +++ b/drivers/gpu/nvgpu/volt/volt_dev.c @@ -17,7 +17,6 @@ #include #include "gk20a/gk20a.h" -#include "gk20a/pmu_gk20a.h" #include "gm206/bios_gm206.h" #include "boardobj/boardobjgrp.h" diff --git a/drivers/gpu/nvgpu/volt/volt_pmu.c b/drivers/gpu/nvgpu/volt/volt_pmu.c index 871afce5..f9d421fc 100644 --- a/drivers/gpu/nvgpu/volt/volt_pmu.c +++ b/drivers/gpu/nvgpu/volt/volt_pmu.c @@ -11,16 +11,17 @@ * more details. */ +#include +#include + #include "gk20a/gk20a.h" #include "boardobj/boardobjgrp.h" #include "boardobj/boardobjgrp_e32.h" #include "gm206/bios_gm206.h" #include "ctrl/ctrlvolt.h" #include "ctrl/ctrlperf.h" -#include "gk20a/pmu_gk20a.h" #include "volt.h" -#include #define RAIL_COUNT 2 diff --git a/drivers/gpu/nvgpu/volt/volt_policy.c b/drivers/gpu/nvgpu/volt/volt_policy.c index ced9dd03..900888ac 100644 --- a/drivers/gpu/nvgpu/volt/volt_policy.c +++ b/drivers/gpu/nvgpu/volt/volt_policy.c @@ -18,7 +18,6 @@ #include "boardobj/boardobjgrp_e32.h" #include "gm206/bios_gm206.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" #include "volt.h" diff --git a/drivers/gpu/nvgpu/volt/volt_rail.c b/drivers/gpu/nvgpu/volt/volt_rail.c index 778c531e..a4a8016a 100644 --- a/drivers/gpu/nvgpu/volt/volt_rail.c +++ b/drivers/gpu/nvgpu/volt/volt_rail.c @@ -18,7 +18,6 @@ #include "boardobj/boardobjgrp_e32.h" #include "gm206/bios_gm206.h" #include "ctrl/ctrlvolt.h" -#include "gk20a/pmu_gk20a.h" #include "volt.h" -- cgit v1.2.2