diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2017-05-10 11:05:24 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-06-05 02:05:18 -0400 |
commit | 673dd971600b26131c0afdb221e13c080da022fd (patch) | |
tree | 7c8416ac2ef61891812773d55c8c8dc61da824aa /drivers/gpu/nvgpu/gk20a | |
parent | 7668ccb2a2e4a8c13d82b427c65be79c725afe08 (diff) |
gpu: nvgpu: moved & renamed "struct pmu_gk20a"
- Renamed "struct pmu_gk20a" to "struct nvgpu_pmu" then moved
to file "pmu.h" under folder "drivers/gpu/nvgpu/include/nvgpu/"
- Included header file "pmu.h" to dependent file &
removed "pmu_gk20a.h" include if its usage is not present.
- Replaced "struct pmu_gk20a" with "struct nvgpu_pmu" in dependent
source & header files.
JIRA NVGPU-56
Change-Id: Ia3c606616831027093d5c216959c6a40d7c2632e
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: http://git-master/r/1479209
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 53 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a_scale.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 318 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | 324 |
6 files changed, 207 insertions, 496 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 89b414be..37e2e185 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -50,6 +50,7 @@ struct gk20a_debug_output; | |||
50 | #include <nvgpu/acr/nvgpu_acr.h> | 50 | #include <nvgpu/acr/nvgpu_acr.h> |
51 | #include <nvgpu/kref.h> | 51 | #include <nvgpu/kref.h> |
52 | #include <nvgpu/falcon.h> | 52 | #include <nvgpu/falcon.h> |
53 | #include <nvgpu/pmu.h> | ||
53 | 54 | ||
54 | #include "clk_gk20a.h" | 55 | #include "clk_gk20a.h" |
55 | #include "ce2_gk20a.h" | 56 | #include "ce2_gk20a.h" |
@@ -523,33 +524,33 @@ struct gpu_ops { | |||
523 | /*used for change of enum zbc update cmd id from ver 0 to ver1*/ | 524 | /*used for change of enum zbc update cmd id from ver 0 to ver1*/ |
524 | u32 cmd_id_zbc_table_update; | 525 | u32 cmd_id_zbc_table_update; |
525 | bool is_pmu_zbc_save_supported; | 526 | bool is_pmu_zbc_save_supported; |
526 | u32 (*get_pmu_cmdline_args_size)(struct pmu_gk20a *pmu); | 527 | u32 (*get_pmu_cmdline_args_size)(struct nvgpu_pmu *pmu); |
527 | void (*set_pmu_cmdline_args_cpu_freq)(struct pmu_gk20a *pmu, | 528 | void (*set_pmu_cmdline_args_cpu_freq)(struct nvgpu_pmu *pmu, |
528 | u32 freq); | 529 | u32 freq); |
529 | void (*set_pmu_cmdline_args_trace_size)(struct pmu_gk20a *pmu, | 530 | void (*set_pmu_cmdline_args_trace_size)(struct nvgpu_pmu *pmu, |
530 | u32 size); | 531 | u32 size); |
531 | void (*set_pmu_cmdline_args_trace_dma_base)( | 532 | void (*set_pmu_cmdline_args_trace_dma_base)( |
532 | struct pmu_gk20a *pmu); | 533 | struct nvgpu_pmu *pmu); |
533 | void (*set_pmu_cmdline_args_trace_dma_idx)( | 534 | void (*set_pmu_cmdline_args_trace_dma_idx)( |
534 | struct pmu_gk20a *pmu, u32 idx); | 535 | struct nvgpu_pmu *pmu, u32 idx); |
535 | void * (*get_pmu_cmdline_args_ptr)(struct pmu_gk20a *pmu); | 536 | void * (*get_pmu_cmdline_args_ptr)(struct nvgpu_pmu *pmu); |
536 | u32 (*get_pmu_allocation_struct_size)(struct pmu_gk20a *pmu); | 537 | u32 (*get_pmu_allocation_struct_size)(struct nvgpu_pmu *pmu); |
537 | void (*set_pmu_allocation_ptr)(struct pmu_gk20a *pmu, | 538 | void (*set_pmu_allocation_ptr)(struct nvgpu_pmu *pmu, |
538 | void **pmu_alloc_ptr, void *assign_ptr); | 539 | void **pmu_alloc_ptr, void *assign_ptr); |
539 | void (*pmu_allocation_set_dmem_size)(struct pmu_gk20a *pmu, | 540 | void (*pmu_allocation_set_dmem_size)(struct nvgpu_pmu *pmu, |
540 | void *pmu_alloc_ptr, u16 size); | 541 | void *pmu_alloc_ptr, u16 size); |
541 | u16 (*pmu_allocation_get_dmem_size)(struct pmu_gk20a *pmu, | 542 | u16 (*pmu_allocation_get_dmem_size)(struct nvgpu_pmu *pmu, |
542 | void *pmu_alloc_ptr); | 543 | void *pmu_alloc_ptr); |
543 | u32 (*pmu_allocation_get_dmem_offset)(struct pmu_gk20a *pmu, | 544 | u32 (*pmu_allocation_get_dmem_offset)(struct nvgpu_pmu *pmu, |
544 | void *pmu_alloc_ptr); | 545 | void *pmu_alloc_ptr); |
545 | u32 * (*pmu_allocation_get_dmem_offset_addr)( | 546 | u32 * (*pmu_allocation_get_dmem_offset_addr)( |
546 | struct pmu_gk20a *pmu, void *pmu_alloc_ptr); | 547 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); |
547 | void (*pmu_allocation_set_dmem_offset)(struct pmu_gk20a *pmu, | 548 | void (*pmu_allocation_set_dmem_offset)(struct nvgpu_pmu *pmu, |
548 | void *pmu_alloc_ptr, u32 offset); | 549 | void *pmu_alloc_ptr, u32 offset); |
549 | void * (*pmu_allocation_get_fb_addr)( | 550 | void * (*pmu_allocation_get_fb_addr)( |
550 | struct pmu_gk20a *pmu, void *pmu_alloc_ptr); | 551 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); |
551 | u32 (*pmu_allocation_get_fb_size)( | 552 | u32 (*pmu_allocation_get_fb_size)( |
552 | struct pmu_gk20a *pmu, void *pmu_alloc_ptr); | 553 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); |
553 | void (*get_pmu_init_msg_pmu_queue_params)( | 554 | void (*get_pmu_init_msg_pmu_queue_params)( |
554 | struct pmu_queue *queue, u32 id, | 555 | struct pmu_queue *queue, u32 id, |
555 | void *pmu_init_msg); | 556 | void *pmu_init_msg); |
@@ -590,15 +591,15 @@ struct gpu_ops { | |||
590 | struct pmu_sequence *seq); | 591 | struct pmu_sequence *seq); |
591 | void *(*get_pmu_seq_out_a_ptr)( | 592 | void *(*get_pmu_seq_out_a_ptr)( |
592 | struct pmu_sequence *seq); | 593 | struct pmu_sequence *seq); |
593 | void (*set_pmu_cmdline_args_secure_mode)(struct pmu_gk20a *pmu, | 594 | void (*set_pmu_cmdline_args_secure_mode)(struct nvgpu_pmu *pmu, |
594 | u32 val); | 595 | u32 val); |
595 | u32 (*get_perfmon_cntr_sz)(struct pmu_gk20a *pmu); | 596 | u32 (*get_perfmon_cntr_sz)(struct nvgpu_pmu *pmu); |
596 | void * (*get_perfmon_cntr_ptr)(struct pmu_gk20a *pmu); | 597 | void * (*get_perfmon_cntr_ptr)(struct nvgpu_pmu *pmu); |
597 | void (*set_perfmon_cntr_ut)(struct pmu_gk20a *pmu, u16 ut); | 598 | void (*set_perfmon_cntr_ut)(struct nvgpu_pmu *pmu, u16 ut); |
598 | void (*set_perfmon_cntr_lt)(struct pmu_gk20a *pmu, u16 lt); | 599 | void (*set_perfmon_cntr_lt)(struct nvgpu_pmu *pmu, u16 lt); |
599 | void (*set_perfmon_cntr_valid)(struct pmu_gk20a *pmu, u8 val); | 600 | void (*set_perfmon_cntr_valid)(struct nvgpu_pmu *pmu, u8 val); |
600 | void (*set_perfmon_cntr_index)(struct pmu_gk20a *pmu, u8 val); | 601 | void (*set_perfmon_cntr_index)(struct nvgpu_pmu *pmu, u8 val); |
601 | void (*set_perfmon_cntr_group_id)(struct pmu_gk20a *pmu, | 602 | void (*set_perfmon_cntr_group_id)(struct nvgpu_pmu *pmu, |
602 | u8 gid); | 603 | u8 gid); |
603 | 604 | ||
604 | u8 (*pg_cmd_eng_buf_load_size)(struct pmu_pg_cmd *pg); | 605 | u8 (*pg_cmd_eng_buf_load_size)(struct pmu_pg_cmd *pg); |
@@ -728,7 +729,7 @@ struct gpu_ops { | |||
728 | bool (*is_pmu_supported)(struct gk20a *g); | 729 | bool (*is_pmu_supported)(struct gk20a *g); |
729 | int (*prepare_ucode)(struct gk20a *g); | 730 | int (*prepare_ucode)(struct gk20a *g); |
730 | int (*pmu_setup_hw_and_bootstrap)(struct gk20a *g); | 731 | int (*pmu_setup_hw_and_bootstrap)(struct gk20a *g); |
731 | int (*pmu_nsbootstrap)(struct pmu_gk20a *pmu); | 732 | int (*pmu_nsbootstrap)(struct nvgpu_pmu *pmu); |
732 | int (*pmu_setup_elpg)(struct gk20a *g); | 733 | int (*pmu_setup_elpg)(struct gk20a *g); |
733 | u32 (*pmu_get_queue_head)(u32 i); | 734 | u32 (*pmu_get_queue_head)(u32 i); |
734 | u32 (*pmu_get_queue_head_size)(void); | 735 | u32 (*pmu_get_queue_head_size)(void); |
@@ -1014,7 +1015,7 @@ struct gk20a { | |||
1014 | struct gr_gk20a gr; | 1015 | struct gr_gk20a gr; |
1015 | struct sim_gk20a sim; | 1016 | struct sim_gk20a sim; |
1016 | struct mm_gk20a mm; | 1017 | struct mm_gk20a mm; |
1017 | struct pmu_gk20a pmu; | 1018 | struct nvgpu_pmu pmu; |
1018 | struct acr_desc acr; | 1019 | struct acr_desc acr; |
1019 | struct ecc_gk20a ecc; | 1020 | struct ecc_gk20a ecc; |
1020 | struct cooling_device_gk20a gk20a_cdev; | 1021 | struct cooling_device_gk20a gk20a_cdev; |
@@ -1396,7 +1397,7 @@ static inline struct gk20a *gk20a_from_as(struct gk20a_as *as) | |||
1396 | { | 1397 | { |
1397 | return container_of(as, struct gk20a, as); | 1398 | return container_of(as, struct gk20a, as); |
1398 | } | 1399 | } |
1399 | static inline struct gk20a *gk20a_from_pmu(struct pmu_gk20a *pmu) | 1400 | static inline struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu) |
1400 | { | 1401 | { |
1401 | return container_of(pmu, struct gk20a, pmu); | 1402 | return container_of(pmu, struct gk20a, pmu); |
1402 | } | 1403 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_scale.c b/drivers/gpu/nvgpu/gk20a/gk20a_scale.c index 608b2398..c23cdcba 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a_scale.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a_scale.c | |||
@@ -24,15 +24,13 @@ | |||
24 | #include <governor.h> | 24 | #include <governor.h> |
25 | 25 | ||
26 | #include <nvgpu/kmem.h> | 26 | #include <nvgpu/kmem.h> |
27 | #include <nvgpu/log.h> | ||
27 | 28 | ||
28 | #include "gk20a.h" | 29 | #include "gk20a.h" |
29 | #include "platform_gk20a.h" | 30 | #include "platform_gk20a.h" |
30 | #include "pmu_gk20a.h" | ||
31 | #include "clk_gk20a.h" | 31 | #include "clk_gk20a.h" |
32 | #include "gk20a_scale.h" | 32 | #include "gk20a_scale.h" |
33 | 33 | ||
34 | #include <nvgpu/log.h> | ||
35 | |||
36 | /* | 34 | /* |
37 | * gk20a_scale_qos_notify() | 35 | * gk20a_scale_qos_notify() |
38 | * | 36 | * |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c index 4bb8304a..1065968b 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c | |||
@@ -491,7 +491,7 @@ static ssize_t mscg_enable_store(struct device *dev, | |||
491 | struct device_attribute *attr, const char *buf, size_t count) | 491 | struct device_attribute *attr, const char *buf, size_t count) |
492 | { | 492 | { |
493 | struct gk20a *g = get_gk20a(dev); | 493 | struct gk20a *g = get_gk20a(dev); |
494 | struct pmu_gk20a *pmu = &g->pmu; | 494 | struct nvgpu_pmu *pmu = &g->pmu; |
495 | unsigned long val = 0; | 495 | unsigned long val = 0; |
496 | int err; | 496 | int err; |
497 | 497 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 3b46b807..2cf55119 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -5193,7 +5193,7 @@ clean_up: | |||
5193 | 5193 | ||
5194 | static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) | 5194 | static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) |
5195 | { | 5195 | { |
5196 | struct pmu_gk20a *pmu = &g->pmu; | 5196 | struct nvgpu_pmu *pmu = &g->pmu; |
5197 | struct mm_gk20a *mm = &g->mm; | 5197 | struct mm_gk20a *mm = &g->mm; |
5198 | struct vm_gk20a *vm = &mm->pmu.vm; | 5198 | struct vm_gk20a *vm = &mm->pmu.vm; |
5199 | int err = 0; | 5199 | int err = 0; |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index fc46db91..e74a5264 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -56,171 +56,171 @@ static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, | |||
56 | 56 | ||
57 | static int pmu_init_powergating(struct gk20a *g); | 57 | static int pmu_init_powergating(struct gk20a *g); |
58 | 58 | ||
59 | static u32 pmu_perfmon_cntr_sz_v0(struct pmu_gk20a *pmu) | 59 | static u32 pmu_perfmon_cntr_sz_v0(struct nvgpu_pmu *pmu) |
60 | { | 60 | { |
61 | return sizeof(struct pmu_perfmon_counter_v0); | 61 | return sizeof(struct pmu_perfmon_counter_v0); |
62 | } | 62 | } |
63 | 63 | ||
64 | static u32 pmu_perfmon_cntr_sz_v2(struct pmu_gk20a *pmu) | 64 | static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) |
65 | { | 65 | { |
66 | return sizeof(struct pmu_perfmon_counter_v2); | 66 | return sizeof(struct pmu_perfmon_counter_v2); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void *get_perfmon_cntr_ptr_v2(struct pmu_gk20a *pmu) | 69 | static void *get_perfmon_cntr_ptr_v2(struct nvgpu_pmu *pmu) |
70 | { | 70 | { |
71 | return (void *)(&pmu->perfmon_counter_v2); | 71 | return (void *)(&pmu->perfmon_counter_v2); |
72 | } | 72 | } |
73 | 73 | ||
74 | static void *get_perfmon_cntr_ptr_v0(struct pmu_gk20a *pmu) | 74 | static void *get_perfmon_cntr_ptr_v0(struct nvgpu_pmu *pmu) |
75 | { | 75 | { |
76 | return (void *)(&pmu->perfmon_counter_v0); | 76 | return (void *)(&pmu->perfmon_counter_v0); |
77 | } | 77 | } |
78 | 78 | ||
79 | static void set_perfmon_cntr_ut_v2(struct pmu_gk20a *pmu, u16 ut) | 79 | static void set_perfmon_cntr_ut_v2(struct nvgpu_pmu *pmu, u16 ut) |
80 | { | 80 | { |
81 | pmu->perfmon_counter_v2.upper_threshold = ut; | 81 | pmu->perfmon_counter_v2.upper_threshold = ut; |
82 | } | 82 | } |
83 | 83 | ||
84 | static void set_perfmon_cntr_ut_v0(struct pmu_gk20a *pmu, u16 ut) | 84 | static void set_perfmon_cntr_ut_v0(struct nvgpu_pmu *pmu, u16 ut) |
85 | { | 85 | { |
86 | pmu->perfmon_counter_v0.upper_threshold = ut; | 86 | pmu->perfmon_counter_v0.upper_threshold = ut; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void set_perfmon_cntr_lt_v2(struct pmu_gk20a *pmu, u16 lt) | 89 | static void set_perfmon_cntr_lt_v2(struct nvgpu_pmu *pmu, u16 lt) |
90 | { | 90 | { |
91 | pmu->perfmon_counter_v2.lower_threshold = lt; | 91 | pmu->perfmon_counter_v2.lower_threshold = lt; |
92 | } | 92 | } |
93 | 93 | ||
94 | static void set_perfmon_cntr_lt_v0(struct pmu_gk20a *pmu, u16 lt) | 94 | static void set_perfmon_cntr_lt_v0(struct nvgpu_pmu *pmu, u16 lt) |
95 | { | 95 | { |
96 | pmu->perfmon_counter_v0.lower_threshold = lt; | 96 | pmu->perfmon_counter_v0.lower_threshold = lt; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void set_perfmon_cntr_valid_v2(struct pmu_gk20a *pmu, u8 valid) | 99 | static void set_perfmon_cntr_valid_v2(struct nvgpu_pmu *pmu, u8 valid) |
100 | { | 100 | { |
101 | pmu->perfmon_counter_v2.valid = valid; | 101 | pmu->perfmon_counter_v2.valid = valid; |
102 | } | 102 | } |
103 | 103 | ||
104 | static void set_perfmon_cntr_valid_v0(struct pmu_gk20a *pmu, u8 valid) | 104 | static void set_perfmon_cntr_valid_v0(struct nvgpu_pmu *pmu, u8 valid) |
105 | { | 105 | { |
106 | pmu->perfmon_counter_v0.valid = valid; | 106 | pmu->perfmon_counter_v0.valid = valid; |
107 | } | 107 | } |
108 | 108 | ||
109 | static void set_perfmon_cntr_index_v2(struct pmu_gk20a *pmu, u8 index) | 109 | static void set_perfmon_cntr_index_v2(struct nvgpu_pmu *pmu, u8 index) |
110 | { | 110 | { |
111 | pmu->perfmon_counter_v2.index = index; | 111 | pmu->perfmon_counter_v2.index = index; |
112 | } | 112 | } |
113 | 113 | ||
114 | static void set_perfmon_cntr_index_v0(struct pmu_gk20a *pmu, u8 index) | 114 | static void set_perfmon_cntr_index_v0(struct nvgpu_pmu *pmu, u8 index) |
115 | { | 115 | { |
116 | pmu->perfmon_counter_v0.index = index; | 116 | pmu->perfmon_counter_v0.index = index; |
117 | } | 117 | } |
118 | 118 | ||
119 | static void set_perfmon_cntr_group_id_v2(struct pmu_gk20a *pmu, u8 gid) | 119 | static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid) |
120 | { | 120 | { |
121 | pmu->perfmon_counter_v2.group_id = gid; | 121 | pmu->perfmon_counter_v2.group_id = gid; |
122 | } | 122 | } |
123 | 123 | ||
124 | static void set_perfmon_cntr_group_id_v0(struct pmu_gk20a *pmu, u8 gid) | 124 | static void set_perfmon_cntr_group_id_v0(struct nvgpu_pmu *pmu, u8 gid) |
125 | { | 125 | { |
126 | pmu->perfmon_counter_v0.group_id = gid; | 126 | pmu->perfmon_counter_v0.group_id = gid; |
127 | } | 127 | } |
128 | 128 | ||
129 | static u32 pmu_cmdline_size_v0(struct pmu_gk20a *pmu) | 129 | static u32 pmu_cmdline_size_v0(struct nvgpu_pmu *pmu) |
130 | { | 130 | { |
131 | return sizeof(struct pmu_cmdline_args_v0); | 131 | return sizeof(struct pmu_cmdline_args_v0); |
132 | } | 132 | } |
133 | 133 | ||
134 | static u32 pmu_cmdline_size_v1(struct pmu_gk20a *pmu) | 134 | static u32 pmu_cmdline_size_v1(struct nvgpu_pmu *pmu) |
135 | { | 135 | { |
136 | return sizeof(struct pmu_cmdline_args_v1); | 136 | return sizeof(struct pmu_cmdline_args_v1); |
137 | } | 137 | } |
138 | 138 | ||
139 | static u32 pmu_cmdline_size_v2(struct pmu_gk20a *pmu) | 139 | static u32 pmu_cmdline_size_v2(struct nvgpu_pmu *pmu) |
140 | { | 140 | { |
141 | return sizeof(struct pmu_cmdline_args_v2); | 141 | return sizeof(struct pmu_cmdline_args_v2); |
142 | } | 142 | } |
143 | 143 | ||
144 | static void set_pmu_cmdline_args_cpufreq_v2(struct pmu_gk20a *pmu, u32 freq) | 144 | static void set_pmu_cmdline_args_cpufreq_v2(struct nvgpu_pmu *pmu, u32 freq) |
145 | { | 145 | { |
146 | pmu->args_v2.cpu_freq_hz = freq; | 146 | pmu->args_v2.cpu_freq_hz = freq; |
147 | } | 147 | } |
148 | static void set_pmu_cmdline_args_secure_mode_v2(struct pmu_gk20a *pmu, u32 val) | 148 | static void set_pmu_cmdline_args_secure_mode_v2(struct nvgpu_pmu *pmu, u32 val) |
149 | { | 149 | { |
150 | pmu->args_v2.secure_mode = val; | 150 | pmu->args_v2.secure_mode = val; |
151 | } | 151 | } |
152 | 152 | ||
153 | static void set_pmu_cmdline_args_falctracesize_v2( | 153 | static void set_pmu_cmdline_args_falctracesize_v2( |
154 | struct pmu_gk20a *pmu, u32 size) | 154 | struct nvgpu_pmu *pmu, u32 size) |
155 | { | 155 | { |
156 | pmu->args_v2.falc_trace_size = size; | 156 | pmu->args_v2.falc_trace_size = size; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void set_pmu_cmdline_args_falctracedmabase_v2(struct pmu_gk20a *pmu) | 159 | static void set_pmu_cmdline_args_falctracedmabase_v2(struct nvgpu_pmu *pmu) |
160 | { | 160 | { |
161 | pmu->args_v2.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | 161 | pmu->args_v2.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; |
162 | } | 162 | } |
163 | 163 | ||
164 | static void set_pmu_cmdline_args_falctracedmaidx_v2( | 164 | static void set_pmu_cmdline_args_falctracedmaidx_v2( |
165 | struct pmu_gk20a *pmu, u32 idx) | 165 | struct nvgpu_pmu *pmu, u32 idx) |
166 | { | 166 | { |
167 | pmu->args_v2.falc_trace_dma_idx = idx; | 167 | pmu->args_v2.falc_trace_dma_idx = idx; |
168 | } | 168 | } |
169 | 169 | ||
170 | 170 | ||
171 | static void set_pmu_cmdline_args_falctracedmabase_v4(struct pmu_gk20a *pmu) | 171 | static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) |
172 | { | 172 | { |
173 | pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | 173 | pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; |
174 | pmu->args_v4.dma_addr.dma_base1 = 0; | 174 | pmu->args_v4.dma_addr.dma_base1 = 0; |
175 | pmu->args_v4.dma_addr.dma_offset = 0; | 175 | pmu->args_v4.dma_addr.dma_offset = 0; |
176 | } | 176 | } |
177 | 177 | ||
178 | static u32 pmu_cmdline_size_v4(struct pmu_gk20a *pmu) | 178 | static u32 pmu_cmdline_size_v4(struct nvgpu_pmu *pmu) |
179 | { | 179 | { |
180 | return sizeof(struct pmu_cmdline_args_v4); | 180 | return sizeof(struct pmu_cmdline_args_v4); |
181 | } | 181 | } |
182 | 182 | ||
183 | static void set_pmu_cmdline_args_cpufreq_v4(struct pmu_gk20a *pmu, u32 freq) | 183 | static void set_pmu_cmdline_args_cpufreq_v4(struct nvgpu_pmu *pmu, u32 freq) |
184 | { | 184 | { |
185 | pmu->args_v4.cpu_freq_hz = freq; | 185 | pmu->args_v4.cpu_freq_hz = freq; |
186 | } | 186 | } |
187 | static void set_pmu_cmdline_args_secure_mode_v4(struct pmu_gk20a *pmu, u32 val) | 187 | static void set_pmu_cmdline_args_secure_mode_v4(struct nvgpu_pmu *pmu, u32 val) |
188 | { | 188 | { |
189 | pmu->args_v4.secure_mode = val; | 189 | pmu->args_v4.secure_mode = val; |
190 | } | 190 | } |
191 | 191 | ||
192 | static void set_pmu_cmdline_args_falctracesize_v4( | 192 | static void set_pmu_cmdline_args_falctracesize_v4( |
193 | struct pmu_gk20a *pmu, u32 size) | 193 | struct nvgpu_pmu *pmu, u32 size) |
194 | { | 194 | { |
195 | pmu->args_v4.falc_trace_size = size; | 195 | pmu->args_v4.falc_trace_size = size; |
196 | } | 196 | } |
197 | static void set_pmu_cmdline_args_falctracedmaidx_v4( | 197 | static void set_pmu_cmdline_args_falctracedmaidx_v4( |
198 | struct pmu_gk20a *pmu, u32 idx) | 198 | struct nvgpu_pmu *pmu, u32 idx) |
199 | { | 199 | { |
200 | pmu->args_v4.falc_trace_dma_idx = idx; | 200 | pmu->args_v4.falc_trace_dma_idx = idx; |
201 | } | 201 | } |
202 | 202 | ||
203 | static u32 pmu_cmdline_size_v5(struct pmu_gk20a *pmu) | 203 | static u32 pmu_cmdline_size_v5(struct nvgpu_pmu *pmu) |
204 | { | 204 | { |
205 | return sizeof(struct pmu_cmdline_args_v5); | 205 | return sizeof(struct pmu_cmdline_args_v5); |
206 | } | 206 | } |
207 | 207 | ||
208 | static void set_pmu_cmdline_args_cpufreq_v5(struct pmu_gk20a *pmu, u32 freq) | 208 | static void set_pmu_cmdline_args_cpufreq_v5(struct nvgpu_pmu *pmu, u32 freq) |
209 | { | 209 | { |
210 | pmu->args_v5.cpu_freq_hz = 204000000; | 210 | pmu->args_v5.cpu_freq_hz = 204000000; |
211 | } | 211 | } |
212 | static void set_pmu_cmdline_args_secure_mode_v5(struct pmu_gk20a *pmu, u32 val) | 212 | static void set_pmu_cmdline_args_secure_mode_v5(struct nvgpu_pmu *pmu, u32 val) |
213 | { | 213 | { |
214 | pmu->args_v5.secure_mode = val; | 214 | pmu->args_v5.secure_mode = val; |
215 | } | 215 | } |
216 | 216 | ||
217 | static void set_pmu_cmdline_args_falctracesize_v5( | 217 | static void set_pmu_cmdline_args_falctracesize_v5( |
218 | struct pmu_gk20a *pmu, u32 size) | 218 | struct nvgpu_pmu *pmu, u32 size) |
219 | { | 219 | { |
220 | /* set by surface describe */ | 220 | /* set by surface describe */ |
221 | } | 221 | } |
222 | 222 | ||
223 | static void set_pmu_cmdline_args_falctracedmabase_v5(struct pmu_gk20a *pmu) | 223 | static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu) |
224 | { | 224 | { |
225 | struct gk20a *g = gk20a_from_pmu(pmu); | 225 | struct gk20a *g = gk20a_from_pmu(pmu); |
226 | 226 | ||
@@ -228,53 +228,53 @@ static void set_pmu_cmdline_args_falctracedmabase_v5(struct pmu_gk20a *pmu) | |||
228 | } | 228 | } |
229 | 229 | ||
230 | static void set_pmu_cmdline_args_falctracedmaidx_v5( | 230 | static void set_pmu_cmdline_args_falctracedmaidx_v5( |
231 | struct pmu_gk20a *pmu, u32 idx) | 231 | struct nvgpu_pmu *pmu, u32 idx) |
232 | { | 232 | { |
233 | /* set by surface describe */ | 233 | /* set by surface describe */ |
234 | } | 234 | } |
235 | 235 | ||
236 | static u32 pmu_cmdline_size_v3(struct pmu_gk20a *pmu) | 236 | static u32 pmu_cmdline_size_v3(struct nvgpu_pmu *pmu) |
237 | { | 237 | { |
238 | return sizeof(struct pmu_cmdline_args_v3); | 238 | return sizeof(struct pmu_cmdline_args_v3); |
239 | } | 239 | } |
240 | 240 | ||
241 | static void set_pmu_cmdline_args_cpufreq_v3(struct pmu_gk20a *pmu, u32 freq) | 241 | static void set_pmu_cmdline_args_cpufreq_v3(struct nvgpu_pmu *pmu, u32 freq) |
242 | { | 242 | { |
243 | pmu->args_v3.cpu_freq_hz = freq; | 243 | pmu->args_v3.cpu_freq_hz = freq; |
244 | } | 244 | } |
245 | static void set_pmu_cmdline_args_secure_mode_v3(struct pmu_gk20a *pmu, u32 val) | 245 | static void set_pmu_cmdline_args_secure_mode_v3(struct nvgpu_pmu *pmu, u32 val) |
246 | { | 246 | { |
247 | pmu->args_v3.secure_mode = val; | 247 | pmu->args_v3.secure_mode = val; |
248 | } | 248 | } |
249 | 249 | ||
250 | static void set_pmu_cmdline_args_falctracesize_v3( | 250 | static void set_pmu_cmdline_args_falctracesize_v3( |
251 | struct pmu_gk20a *pmu, u32 size) | 251 | struct nvgpu_pmu *pmu, u32 size) |
252 | { | 252 | { |
253 | pmu->args_v3.falc_trace_size = size; | 253 | pmu->args_v3.falc_trace_size = size; |
254 | } | 254 | } |
255 | 255 | ||
256 | static void set_pmu_cmdline_args_falctracedmabase_v3(struct pmu_gk20a *pmu) | 256 | static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) |
257 | { | 257 | { |
258 | pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | 258 | pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; |
259 | } | 259 | } |
260 | 260 | ||
261 | static void set_pmu_cmdline_args_falctracedmaidx_v3( | 261 | static void set_pmu_cmdline_args_falctracedmaidx_v3( |
262 | struct pmu_gk20a *pmu, u32 idx) | 262 | struct nvgpu_pmu *pmu, u32 idx) |
263 | { | 263 | { |
264 | pmu->args_v3.falc_trace_dma_idx = idx; | 264 | pmu->args_v3.falc_trace_dma_idx = idx; |
265 | } | 265 | } |
266 | 266 | ||
267 | static void set_pmu_cmdline_args_cpufreq_v1(struct pmu_gk20a *pmu, u32 freq) | 267 | static void set_pmu_cmdline_args_cpufreq_v1(struct nvgpu_pmu *pmu, u32 freq) |
268 | { | 268 | { |
269 | pmu->args_v1.cpu_freq_hz = freq; | 269 | pmu->args_v1.cpu_freq_hz = freq; |
270 | } | 270 | } |
271 | static void set_pmu_cmdline_args_secure_mode_v1(struct pmu_gk20a *pmu, u32 val) | 271 | static void set_pmu_cmdline_args_secure_mode_v1(struct nvgpu_pmu *pmu, u32 val) |
272 | { | 272 | { |
273 | pmu->args_v1.secure_mode = val; | 273 | pmu->args_v1.secure_mode = val; |
274 | } | 274 | } |
275 | 275 | ||
276 | static void set_pmu_cmdline_args_falctracesize_v1( | 276 | static void set_pmu_cmdline_args_falctracesize_v1( |
277 | struct pmu_gk20a *pmu, u32 size) | 277 | struct nvgpu_pmu *pmu, u32 size) |
278 | { | 278 | { |
279 | pmu->args_v1.falc_trace_size = size; | 279 | pmu->args_v1.falc_trace_size = size; |
280 | } | 280 | } |
@@ -293,7 +293,7 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) | |||
293 | return false; | 293 | return false; |
294 | } | 294 | } |
295 | 295 | ||
296 | static void printtrace(struct pmu_gk20a *pmu) | 296 | static void printtrace(struct nvgpu_pmu *pmu) |
297 | { | 297 | { |
298 | u32 i = 0, j = 0, k, l, m, count; | 298 | u32 i = 0, j = 0, k, l, m, count; |
299 | char part_str[40], buf[0x40]; | 299 | char part_str[40], buf[0x40]; |
@@ -340,72 +340,72 @@ static void printtrace(struct pmu_gk20a *pmu) | |||
340 | nvgpu_kfree(g, tracebuffer); | 340 | nvgpu_kfree(g, tracebuffer); |
341 | } | 341 | } |
342 | 342 | ||
343 | static void set_pmu_cmdline_args_falctracedmabase_v1(struct pmu_gk20a *pmu) | 343 | static void set_pmu_cmdline_args_falctracedmabase_v1(struct nvgpu_pmu *pmu) |
344 | { | 344 | { |
345 | pmu->args_v1.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | 345 | pmu->args_v1.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; |
346 | } | 346 | } |
347 | 347 | ||
348 | static void set_pmu_cmdline_args_falctracedmaidx_v1( | 348 | static void set_pmu_cmdline_args_falctracedmaidx_v1( |
349 | struct pmu_gk20a *pmu, u32 idx) | 349 | struct nvgpu_pmu *pmu, u32 idx) |
350 | { | 350 | { |
351 | pmu->args_v1.falc_trace_dma_idx = idx; | 351 | pmu->args_v1.falc_trace_dma_idx = idx; |
352 | } | 352 | } |
353 | 353 | ||
354 | static void set_pmu_cmdline_args_cpufreq_v0(struct pmu_gk20a *pmu, u32 freq) | 354 | static void set_pmu_cmdline_args_cpufreq_v0(struct nvgpu_pmu *pmu, u32 freq) |
355 | { | 355 | { |
356 | pmu->args_v0.cpu_freq_hz = freq; | 356 | pmu->args_v0.cpu_freq_hz = freq; |
357 | } | 357 | } |
358 | 358 | ||
359 | static void *get_pmu_cmdline_args_ptr_v4(struct pmu_gk20a *pmu) | 359 | static void *get_pmu_cmdline_args_ptr_v4(struct nvgpu_pmu *pmu) |
360 | { | 360 | { |
361 | return (void *)(&pmu->args_v4); | 361 | return (void *)(&pmu->args_v4); |
362 | } | 362 | } |
363 | 363 | ||
364 | static void *get_pmu_cmdline_args_ptr_v3(struct pmu_gk20a *pmu) | 364 | static void *get_pmu_cmdline_args_ptr_v3(struct nvgpu_pmu *pmu) |
365 | { | 365 | { |
366 | return (void *)(&pmu->args_v3); | 366 | return (void *)(&pmu->args_v3); |
367 | } | 367 | } |
368 | 368 | ||
369 | static void *get_pmu_cmdline_args_ptr_v2(struct pmu_gk20a *pmu) | 369 | static void *get_pmu_cmdline_args_ptr_v2(struct nvgpu_pmu *pmu) |
370 | { | 370 | { |
371 | return (void *)(&pmu->args_v2); | 371 | return (void *)(&pmu->args_v2); |
372 | } | 372 | } |
373 | 373 | ||
374 | static void *get_pmu_cmdline_args_ptr_v5(struct pmu_gk20a *pmu) | 374 | static void *get_pmu_cmdline_args_ptr_v5(struct nvgpu_pmu *pmu) |
375 | { | 375 | { |
376 | return (void *)(&pmu->args_v5); | 376 | return (void *)(&pmu->args_v5); |
377 | } | 377 | } |
378 | static void *get_pmu_cmdline_args_ptr_v1(struct pmu_gk20a *pmu) | 378 | static void *get_pmu_cmdline_args_ptr_v1(struct nvgpu_pmu *pmu) |
379 | { | 379 | { |
380 | return (void *)(&pmu->args_v1); | 380 | return (void *)(&pmu->args_v1); |
381 | } | 381 | } |
382 | 382 | ||
383 | static void *get_pmu_cmdline_args_ptr_v0(struct pmu_gk20a *pmu) | 383 | static void *get_pmu_cmdline_args_ptr_v0(struct nvgpu_pmu *pmu) |
384 | { | 384 | { |
385 | return (void *)(&pmu->args_v0); | 385 | return (void *)(&pmu->args_v0); |
386 | } | 386 | } |
387 | 387 | ||
388 | static u32 get_pmu_allocation_size_v3(struct pmu_gk20a *pmu) | 388 | static u32 get_pmu_allocation_size_v3(struct nvgpu_pmu *pmu) |
389 | { | 389 | { |
390 | return sizeof(struct pmu_allocation_v3); | 390 | return sizeof(struct pmu_allocation_v3); |
391 | } | 391 | } |
392 | 392 | ||
393 | static u32 get_pmu_allocation_size_v2(struct pmu_gk20a *pmu) | 393 | static u32 get_pmu_allocation_size_v2(struct nvgpu_pmu *pmu) |
394 | { | 394 | { |
395 | return sizeof(struct pmu_allocation_v2); | 395 | return sizeof(struct pmu_allocation_v2); |
396 | } | 396 | } |
397 | 397 | ||
398 | static u32 get_pmu_allocation_size_v1(struct pmu_gk20a *pmu) | 398 | static u32 get_pmu_allocation_size_v1(struct nvgpu_pmu *pmu) |
399 | { | 399 | { |
400 | return sizeof(struct pmu_allocation_v1); | 400 | return sizeof(struct pmu_allocation_v1); |
401 | } | 401 | } |
402 | 402 | ||
403 | static u32 get_pmu_allocation_size_v0(struct pmu_gk20a *pmu) | 403 | static u32 get_pmu_allocation_size_v0(struct nvgpu_pmu *pmu) |
404 | { | 404 | { |
405 | return sizeof(struct pmu_allocation_v0); | 405 | return sizeof(struct pmu_allocation_v0); |
406 | } | 406 | } |
407 | 407 | ||
408 | static void set_pmu_allocation_ptr_v3(struct pmu_gk20a *pmu, | 408 | static void set_pmu_allocation_ptr_v3(struct nvgpu_pmu *pmu, |
409 | void **pmu_alloc_ptr, void *assign_ptr) | 409 | void **pmu_alloc_ptr, void *assign_ptr) |
410 | { | 410 | { |
411 | struct pmu_allocation_v3 **pmu_a_ptr = | 411 | struct pmu_allocation_v3 **pmu_a_ptr = |
@@ -413,7 +413,7 @@ static void set_pmu_allocation_ptr_v3(struct pmu_gk20a *pmu, | |||
413 | *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; | 413 | *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; |
414 | } | 414 | } |
415 | 415 | ||
416 | static void set_pmu_allocation_ptr_v2(struct pmu_gk20a *pmu, | 416 | static void set_pmu_allocation_ptr_v2(struct nvgpu_pmu *pmu, |
417 | void **pmu_alloc_ptr, void *assign_ptr) | 417 | void **pmu_alloc_ptr, void *assign_ptr) |
418 | { | 418 | { |
419 | struct pmu_allocation_v2 **pmu_a_ptr = | 419 | struct pmu_allocation_v2 **pmu_a_ptr = |
@@ -421,7 +421,7 @@ static void set_pmu_allocation_ptr_v2(struct pmu_gk20a *pmu, | |||
421 | *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; | 421 | *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; |
422 | } | 422 | } |
423 | 423 | ||
424 | static void set_pmu_allocation_ptr_v1(struct pmu_gk20a *pmu, | 424 | static void set_pmu_allocation_ptr_v1(struct nvgpu_pmu *pmu, |
425 | void **pmu_alloc_ptr, void *assign_ptr) | 425 | void **pmu_alloc_ptr, void *assign_ptr) |
426 | { | 426 | { |
427 | struct pmu_allocation_v1 **pmu_a_ptr = | 427 | struct pmu_allocation_v1 **pmu_a_ptr = |
@@ -429,7 +429,7 @@ static void set_pmu_allocation_ptr_v1(struct pmu_gk20a *pmu, | |||
429 | *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; | 429 | *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; |
430 | } | 430 | } |
431 | 431 | ||
432 | static void set_pmu_allocation_ptr_v0(struct pmu_gk20a *pmu, | 432 | static void set_pmu_allocation_ptr_v0(struct nvgpu_pmu *pmu, |
433 | void **pmu_alloc_ptr, void *assign_ptr) | 433 | void **pmu_alloc_ptr, void *assign_ptr) |
434 | { | 434 | { |
435 | struct pmu_allocation_v0 **pmu_a_ptr = | 435 | struct pmu_allocation_v0 **pmu_a_ptr = |
@@ -437,7 +437,7 @@ static void set_pmu_allocation_ptr_v0(struct pmu_gk20a *pmu, | |||
437 | *pmu_a_ptr = (struct pmu_allocation_v0 *)assign_ptr; | 437 | *pmu_a_ptr = (struct pmu_allocation_v0 *)assign_ptr; |
438 | } | 438 | } |
439 | 439 | ||
440 | static void pmu_allocation_set_dmem_size_v3(struct pmu_gk20a *pmu, | 440 | static void pmu_allocation_set_dmem_size_v3(struct nvgpu_pmu *pmu, |
441 | void *pmu_alloc_ptr, u16 size) | 441 | void *pmu_alloc_ptr, u16 size) |
442 | { | 442 | { |
443 | struct pmu_allocation_v3 *pmu_a_ptr = | 443 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -445,7 +445,7 @@ static void pmu_allocation_set_dmem_size_v3(struct pmu_gk20a *pmu, | |||
445 | pmu_a_ptr->alloc.dmem.size = size; | 445 | pmu_a_ptr->alloc.dmem.size = size; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void pmu_allocation_set_dmem_size_v2(struct pmu_gk20a *pmu, | 448 | static void pmu_allocation_set_dmem_size_v2(struct nvgpu_pmu *pmu, |
449 | void *pmu_alloc_ptr, u16 size) | 449 | void *pmu_alloc_ptr, u16 size) |
450 | { | 450 | { |
451 | struct pmu_allocation_v2 *pmu_a_ptr = | 451 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -453,7 +453,7 @@ static void pmu_allocation_set_dmem_size_v2(struct pmu_gk20a *pmu, | |||
453 | pmu_a_ptr->alloc.dmem.size = size; | 453 | pmu_a_ptr->alloc.dmem.size = size; |
454 | } | 454 | } |
455 | 455 | ||
456 | static void pmu_allocation_set_dmem_size_v1(struct pmu_gk20a *pmu, | 456 | static void pmu_allocation_set_dmem_size_v1(struct nvgpu_pmu *pmu, |
457 | void *pmu_alloc_ptr, u16 size) | 457 | void *pmu_alloc_ptr, u16 size) |
458 | { | 458 | { |
459 | struct pmu_allocation_v1 *pmu_a_ptr = | 459 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -461,7 +461,7 @@ static void pmu_allocation_set_dmem_size_v1(struct pmu_gk20a *pmu, | |||
461 | pmu_a_ptr->alloc.dmem.size = size; | 461 | pmu_a_ptr->alloc.dmem.size = size; |
462 | } | 462 | } |
463 | 463 | ||
464 | static void pmu_allocation_set_dmem_size_v0(struct pmu_gk20a *pmu, | 464 | static void pmu_allocation_set_dmem_size_v0(struct nvgpu_pmu *pmu, |
465 | void *pmu_alloc_ptr, u16 size) | 465 | void *pmu_alloc_ptr, u16 size) |
466 | { | 466 | { |
467 | struct pmu_allocation_v0 *pmu_a_ptr = | 467 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -469,7 +469,7 @@ static void pmu_allocation_set_dmem_size_v0(struct pmu_gk20a *pmu, | |||
469 | pmu_a_ptr->alloc.dmem.size = size; | 469 | pmu_a_ptr->alloc.dmem.size = size; |
470 | } | 470 | } |
471 | 471 | ||
472 | static u16 pmu_allocation_get_dmem_size_v3(struct pmu_gk20a *pmu, | 472 | static u16 pmu_allocation_get_dmem_size_v3(struct nvgpu_pmu *pmu, |
473 | void *pmu_alloc_ptr) | 473 | void *pmu_alloc_ptr) |
474 | { | 474 | { |
475 | struct pmu_allocation_v3 *pmu_a_ptr = | 475 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -477,7 +477,7 @@ static u16 pmu_allocation_get_dmem_size_v3(struct pmu_gk20a *pmu, | |||
477 | return pmu_a_ptr->alloc.dmem.size; | 477 | return pmu_a_ptr->alloc.dmem.size; |
478 | } | 478 | } |
479 | 479 | ||
480 | static u16 pmu_allocation_get_dmem_size_v2(struct pmu_gk20a *pmu, | 480 | static u16 pmu_allocation_get_dmem_size_v2(struct nvgpu_pmu *pmu, |
481 | void *pmu_alloc_ptr) | 481 | void *pmu_alloc_ptr) |
482 | { | 482 | { |
483 | struct pmu_allocation_v2 *pmu_a_ptr = | 483 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -485,7 +485,7 @@ static u16 pmu_allocation_get_dmem_size_v2(struct pmu_gk20a *pmu, | |||
485 | return pmu_a_ptr->alloc.dmem.size; | 485 | return pmu_a_ptr->alloc.dmem.size; |
486 | } | 486 | } |
487 | 487 | ||
488 | static u16 pmu_allocation_get_dmem_size_v1(struct pmu_gk20a *pmu, | 488 | static u16 pmu_allocation_get_dmem_size_v1(struct nvgpu_pmu *pmu, |
489 | void *pmu_alloc_ptr) | 489 | void *pmu_alloc_ptr) |
490 | { | 490 | { |
491 | struct pmu_allocation_v1 *pmu_a_ptr = | 491 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -493,7 +493,7 @@ static u16 pmu_allocation_get_dmem_size_v1(struct pmu_gk20a *pmu, | |||
493 | return pmu_a_ptr->alloc.dmem.size; | 493 | return pmu_a_ptr->alloc.dmem.size; |
494 | } | 494 | } |
495 | 495 | ||
496 | static u16 pmu_allocation_get_dmem_size_v0(struct pmu_gk20a *pmu, | 496 | static u16 pmu_allocation_get_dmem_size_v0(struct nvgpu_pmu *pmu, |
497 | void *pmu_alloc_ptr) | 497 | void *pmu_alloc_ptr) |
498 | { | 498 | { |
499 | struct pmu_allocation_v0 *pmu_a_ptr = | 499 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -501,7 +501,7 @@ static u16 pmu_allocation_get_dmem_size_v0(struct pmu_gk20a *pmu, | |||
501 | return pmu_a_ptr->alloc.dmem.size; | 501 | return pmu_a_ptr->alloc.dmem.size; |
502 | } | 502 | } |
503 | 503 | ||
504 | static u32 pmu_allocation_get_dmem_offset_v3(struct pmu_gk20a *pmu, | 504 | static u32 pmu_allocation_get_dmem_offset_v3(struct nvgpu_pmu *pmu, |
505 | void *pmu_alloc_ptr) | 505 | void *pmu_alloc_ptr) |
506 | { | 506 | { |
507 | struct pmu_allocation_v3 *pmu_a_ptr = | 507 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -509,7 +509,7 @@ static u32 pmu_allocation_get_dmem_offset_v3(struct pmu_gk20a *pmu, | |||
509 | return pmu_a_ptr->alloc.dmem.offset; | 509 | return pmu_a_ptr->alloc.dmem.offset; |
510 | } | 510 | } |
511 | 511 | ||
512 | static u32 pmu_allocation_get_dmem_offset_v2(struct pmu_gk20a *pmu, | 512 | static u32 pmu_allocation_get_dmem_offset_v2(struct nvgpu_pmu *pmu, |
513 | void *pmu_alloc_ptr) | 513 | void *pmu_alloc_ptr) |
514 | { | 514 | { |
515 | struct pmu_allocation_v2 *pmu_a_ptr = | 515 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -517,7 +517,7 @@ static u32 pmu_allocation_get_dmem_offset_v2(struct pmu_gk20a *pmu, | |||
517 | return pmu_a_ptr->alloc.dmem.offset; | 517 | return pmu_a_ptr->alloc.dmem.offset; |
518 | } | 518 | } |
519 | 519 | ||
520 | static u32 pmu_allocation_get_dmem_offset_v1(struct pmu_gk20a *pmu, | 520 | static u32 pmu_allocation_get_dmem_offset_v1(struct nvgpu_pmu *pmu, |
521 | void *pmu_alloc_ptr) | 521 | void *pmu_alloc_ptr) |
522 | { | 522 | { |
523 | struct pmu_allocation_v1 *pmu_a_ptr = | 523 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -525,7 +525,7 @@ static u32 pmu_allocation_get_dmem_offset_v1(struct pmu_gk20a *pmu, | |||
525 | return pmu_a_ptr->alloc.dmem.offset; | 525 | return pmu_a_ptr->alloc.dmem.offset; |
526 | } | 526 | } |
527 | 527 | ||
528 | static u32 pmu_allocation_get_dmem_offset_v0(struct pmu_gk20a *pmu, | 528 | static u32 pmu_allocation_get_dmem_offset_v0(struct nvgpu_pmu *pmu, |
529 | void *pmu_alloc_ptr) | 529 | void *pmu_alloc_ptr) |
530 | { | 530 | { |
531 | struct pmu_allocation_v0 *pmu_a_ptr = | 531 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -533,7 +533,7 @@ static u32 pmu_allocation_get_dmem_offset_v0(struct pmu_gk20a *pmu, | |||
533 | return pmu_a_ptr->alloc.dmem.offset; | 533 | return pmu_a_ptr->alloc.dmem.offset; |
534 | } | 534 | } |
535 | 535 | ||
536 | static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct pmu_gk20a *pmu, | 536 | static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct nvgpu_pmu *pmu, |
537 | void *pmu_alloc_ptr) | 537 | void *pmu_alloc_ptr) |
538 | { | 538 | { |
539 | struct pmu_allocation_v3 *pmu_a_ptr = | 539 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -542,7 +542,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct pmu_gk20a *pmu, | |||
542 | } | 542 | } |
543 | 543 | ||
544 | static void *pmu_allocation_get_fb_addr_v3( | 544 | static void *pmu_allocation_get_fb_addr_v3( |
545 | struct pmu_gk20a *pmu, void *pmu_alloc_ptr) | 545 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) |
546 | { | 546 | { |
547 | struct pmu_allocation_v3 *pmu_a_ptr = | 547 | struct pmu_allocation_v3 *pmu_a_ptr = |
548 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | 548 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; |
@@ -550,14 +550,14 @@ static void *pmu_allocation_get_fb_addr_v3( | |||
550 | } | 550 | } |
551 | 551 | ||
552 | static u32 pmu_allocation_get_fb_size_v3( | 552 | static u32 pmu_allocation_get_fb_size_v3( |
553 | struct pmu_gk20a *pmu, void *pmu_alloc_ptr) | 553 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) |
554 | { | 554 | { |
555 | struct pmu_allocation_v3 *pmu_a_ptr = | 555 | struct pmu_allocation_v3 *pmu_a_ptr = |
556 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | 556 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; |
557 | return sizeof(pmu_a_ptr->alloc.fb); | 557 | return sizeof(pmu_a_ptr->alloc.fb); |
558 | } | 558 | } |
559 | 559 | ||
560 | static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct pmu_gk20a *pmu, | 560 | static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct nvgpu_pmu *pmu, |
561 | void *pmu_alloc_ptr) | 561 | void *pmu_alloc_ptr) |
562 | { | 562 | { |
563 | struct pmu_allocation_v2 *pmu_a_ptr = | 563 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -565,7 +565,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct pmu_gk20a *pmu, | |||
565 | return &pmu_a_ptr->alloc.dmem.offset; | 565 | return &pmu_a_ptr->alloc.dmem.offset; |
566 | } | 566 | } |
567 | 567 | ||
568 | static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct pmu_gk20a *pmu, | 568 | static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct nvgpu_pmu *pmu, |
569 | void *pmu_alloc_ptr) | 569 | void *pmu_alloc_ptr) |
570 | { | 570 | { |
571 | struct pmu_allocation_v1 *pmu_a_ptr = | 571 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -573,7 +573,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct pmu_gk20a *pmu, | |||
573 | return &pmu_a_ptr->alloc.dmem.offset; | 573 | return &pmu_a_ptr->alloc.dmem.offset; |
574 | } | 574 | } |
575 | 575 | ||
576 | static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct pmu_gk20a *pmu, | 576 | static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct nvgpu_pmu *pmu, |
577 | void *pmu_alloc_ptr) | 577 | void *pmu_alloc_ptr) |
578 | { | 578 | { |
579 | struct pmu_allocation_v0 *pmu_a_ptr = | 579 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -581,7 +581,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct pmu_gk20a *pmu, | |||
581 | return &pmu_a_ptr->alloc.dmem.offset; | 581 | return &pmu_a_ptr->alloc.dmem.offset; |
582 | } | 582 | } |
583 | 583 | ||
584 | static void pmu_allocation_set_dmem_offset_v3(struct pmu_gk20a *pmu, | 584 | static void pmu_allocation_set_dmem_offset_v3(struct nvgpu_pmu *pmu, |
585 | void *pmu_alloc_ptr, u32 offset) | 585 | void *pmu_alloc_ptr, u32 offset) |
586 | { | 586 | { |
587 | struct pmu_allocation_v3 *pmu_a_ptr = | 587 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -589,7 +589,7 @@ static void pmu_allocation_set_dmem_offset_v3(struct pmu_gk20a *pmu, | |||
589 | pmu_a_ptr->alloc.dmem.offset = offset; | 589 | pmu_a_ptr->alloc.dmem.offset = offset; |
590 | } | 590 | } |
591 | 591 | ||
592 | static void pmu_allocation_set_dmem_offset_v2(struct pmu_gk20a *pmu, | 592 | static void pmu_allocation_set_dmem_offset_v2(struct nvgpu_pmu *pmu, |
593 | void *pmu_alloc_ptr, u32 offset) | 593 | void *pmu_alloc_ptr, u32 offset) |
594 | { | 594 | { |
595 | struct pmu_allocation_v2 *pmu_a_ptr = | 595 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -597,7 +597,7 @@ static void pmu_allocation_set_dmem_offset_v2(struct pmu_gk20a *pmu, | |||
597 | pmu_a_ptr->alloc.dmem.offset = offset; | 597 | pmu_a_ptr->alloc.dmem.offset = offset; |
598 | } | 598 | } |
599 | 599 | ||
600 | static void pmu_allocation_set_dmem_offset_v1(struct pmu_gk20a *pmu, | 600 | static void pmu_allocation_set_dmem_offset_v1(struct nvgpu_pmu *pmu, |
601 | void *pmu_alloc_ptr, u32 offset) | 601 | void *pmu_alloc_ptr, u32 offset) |
602 | { | 602 | { |
603 | struct pmu_allocation_v1 *pmu_a_ptr = | 603 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -605,7 +605,7 @@ static void pmu_allocation_set_dmem_offset_v1(struct pmu_gk20a *pmu, | |||
605 | pmu_a_ptr->alloc.dmem.offset = offset; | 605 | pmu_a_ptr->alloc.dmem.offset = offset; |
606 | } | 606 | } |
607 | 607 | ||
608 | static void pmu_allocation_set_dmem_offset_v0(struct pmu_gk20a *pmu, | 608 | static void pmu_allocation_set_dmem_offset_v0(struct nvgpu_pmu *pmu, |
609 | void *pmu_alloc_ptr, u32 offset) | 609 | void *pmu_alloc_ptr, u32 offset) |
610 | { | 610 | { |
611 | struct pmu_allocation_v0 *pmu_a_ptr = | 611 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -1421,7 +1421,7 @@ static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg, | |||
1421 | pg->eng_buf_load_v2.dma_desc.params |= (value << 24); | 1421 | pg->eng_buf_load_v2.dma_desc.params |= (value << 24); |
1422 | } | 1422 | } |
1423 | 1423 | ||
1424 | int gk20a_init_pmu(struct pmu_gk20a *pmu) | 1424 | int gk20a_init_pmu(struct nvgpu_pmu *pmu) |
1425 | { | 1425 | { |
1426 | struct gk20a *g = gk20a_from_pmu(pmu); | 1426 | struct gk20a *g = gk20a_from_pmu(pmu); |
1427 | struct pmu_v *pv = &g->ops.pmu_ver; | 1427 | struct pmu_v *pv = &g->ops.pmu_ver; |
@@ -2214,7 +2214,7 @@ fail_elpg: | |||
2214 | return err; | 2214 | return err; |
2215 | } | 2215 | } |
2216 | 2216 | ||
2217 | void pmu_copy_from_dmem(struct pmu_gk20a *pmu, | 2217 | void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, |
2218 | u32 src, u8 *dst, u32 size, u8 port) | 2218 | u32 src, u8 *dst, u32 size, u8 port) |
2219 | { | 2219 | { |
2220 | struct gk20a *g = gk20a_from_pmu(pmu); | 2220 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2258,7 +2258,7 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu, | |||
2258 | return; | 2258 | return; |
2259 | } | 2259 | } |
2260 | 2260 | ||
2261 | void pmu_copy_to_dmem(struct pmu_gk20a *pmu, | 2261 | void pmu_copy_to_dmem(struct nvgpu_pmu *pmu, |
2262 | u32 dst, u8 *src, u32 size, u8 port) | 2262 | u32 dst, u8 *src, u32 size, u8 port) |
2263 | { | 2263 | { |
2264 | struct gk20a *g = gk20a_from_pmu(pmu); | 2264 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2309,7 +2309,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu, | |||
2309 | return; | 2309 | return; |
2310 | } | 2310 | } |
2311 | 2311 | ||
2312 | int pmu_idle(struct pmu_gk20a *pmu) | 2312 | int pmu_idle(struct nvgpu_pmu *pmu) |
2313 | { | 2313 | { |
2314 | struct gk20a *g = gk20a_from_pmu(pmu); | 2314 | struct gk20a *g = gk20a_from_pmu(pmu); |
2315 | struct nvgpu_timeout timeout; | 2315 | struct nvgpu_timeout timeout; |
@@ -2338,7 +2338,7 @@ int pmu_idle(struct pmu_gk20a *pmu) | |||
2338 | return 0; | 2338 | return 0; |
2339 | } | 2339 | } |
2340 | 2340 | ||
2341 | void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable) | 2341 | void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) |
2342 | { | 2342 | { |
2343 | struct gk20a *g = gk20a_from_pmu(pmu); | 2343 | struct gk20a *g = gk20a_from_pmu(pmu); |
2344 | 2344 | ||
@@ -2400,7 +2400,7 @@ void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable) | |||
2400 | gk20a_dbg_fn("done"); | 2400 | gk20a_dbg_fn("done"); |
2401 | } | 2401 | } |
2402 | 2402 | ||
2403 | int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) | 2403 | int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable) |
2404 | { | 2404 | { |
2405 | struct gk20a *g = gk20a_from_pmu(pmu); | 2405 | struct gk20a *g = gk20a_from_pmu(pmu); |
2406 | struct nvgpu_timeout timeout; | 2406 | struct nvgpu_timeout timeout; |
@@ -2443,7 +2443,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) | |||
2443 | } | 2443 | } |
2444 | } | 2444 | } |
2445 | 2445 | ||
2446 | static int pmu_enable(struct pmu_gk20a *pmu, bool enable) | 2446 | static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) |
2447 | { | 2447 | { |
2448 | struct gk20a *g = gk20a_from_pmu(pmu); | 2448 | struct gk20a *g = gk20a_from_pmu(pmu); |
2449 | u32 pmc_enable; | 2449 | u32 pmc_enable; |
@@ -2477,7 +2477,7 @@ static int pmu_enable(struct pmu_gk20a *pmu, bool enable) | |||
2477 | return 0; | 2477 | return 0; |
2478 | } | 2478 | } |
2479 | 2479 | ||
2480 | int pmu_reset(struct pmu_gk20a *pmu) | 2480 | int pmu_reset(struct nvgpu_pmu *pmu) |
2481 | { | 2481 | { |
2482 | int err; | 2482 | int err; |
2483 | 2483 | ||
@@ -2502,7 +2502,7 @@ int pmu_reset(struct pmu_gk20a *pmu) | |||
2502 | return 0; | 2502 | return 0; |
2503 | } | 2503 | } |
2504 | 2504 | ||
2505 | int pmu_bootstrap(struct pmu_gk20a *pmu) | 2505 | int pmu_bootstrap(struct nvgpu_pmu *pmu) |
2506 | { | 2506 | { |
2507 | struct gk20a *g = gk20a_from_pmu(pmu); | 2507 | struct gk20a *g = gk20a_from_pmu(pmu); |
2508 | struct mm_gk20a *mm = &g->mm; | 2508 | struct mm_gk20a *mm = &g->mm; |
@@ -2593,7 +2593,7 @@ int pmu_bootstrap(struct pmu_gk20a *pmu) | |||
2593 | return 0; | 2593 | return 0; |
2594 | } | 2594 | } |
2595 | 2595 | ||
2596 | void pmu_seq_init(struct pmu_gk20a *pmu) | 2596 | void pmu_seq_init(struct nvgpu_pmu *pmu) |
2597 | { | 2597 | { |
2598 | u32 i; | 2598 | u32 i; |
2599 | 2599 | ||
@@ -2606,7 +2606,7 @@ void pmu_seq_init(struct pmu_gk20a *pmu) | |||
2606 | pmu->seq[i].id = i; | 2606 | pmu->seq[i].id = i; |
2607 | } | 2607 | } |
2608 | 2608 | ||
2609 | static int pmu_seq_acquire(struct pmu_gk20a *pmu, | 2609 | static int pmu_seq_acquire(struct nvgpu_pmu *pmu, |
2610 | struct pmu_sequence **pseq) | 2610 | struct pmu_sequence **pseq) |
2611 | { | 2611 | { |
2612 | struct gk20a *g = gk20a_from_pmu(pmu); | 2612 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2631,7 +2631,7 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu, | |||
2631 | return 0; | 2631 | return 0; |
2632 | } | 2632 | } |
2633 | 2633 | ||
2634 | static void pmu_seq_release(struct pmu_gk20a *pmu, | 2634 | static void pmu_seq_release(struct nvgpu_pmu *pmu, |
2635 | struct pmu_sequence *seq) | 2635 | struct pmu_sequence *seq) |
2636 | { | 2636 | { |
2637 | struct gk20a *g = gk20a_from_pmu(pmu); | 2637 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2649,7 +2649,7 @@ static void pmu_seq_release(struct pmu_gk20a *pmu, | |||
2649 | clear_bit(seq->id, pmu->pmu_seq_tbl); | 2649 | clear_bit(seq->id, pmu->pmu_seq_tbl); |
2650 | } | 2650 | } |
2651 | 2651 | ||
2652 | static int pmu_queue_init(struct pmu_gk20a *pmu, | 2652 | static int pmu_queue_init(struct nvgpu_pmu *pmu, |
2653 | u32 id, union pmu_init_msg_pmu *init) | 2653 | u32 id, union pmu_init_msg_pmu *init) |
2654 | { | 2654 | { |
2655 | struct gk20a *g = gk20a_from_pmu(pmu); | 2655 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2670,7 +2670,7 @@ static int pmu_queue_init(struct pmu_gk20a *pmu, | |||
2670 | return 0; | 2670 | return 0; |
2671 | } | 2671 | } |
2672 | 2672 | ||
2673 | static int pmu_queue_head(struct pmu_gk20a *pmu, struct pmu_queue *queue, | 2673 | static int pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, |
2674 | u32 *head, bool set) | 2674 | u32 *head, bool set) |
2675 | { | 2675 | { |
2676 | struct gk20a *g = gk20a_from_pmu(pmu); | 2676 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2707,7 +2707,7 @@ static int pmu_queue_head(struct pmu_gk20a *pmu, struct pmu_queue *queue, | |||
2707 | return 0; | 2707 | return 0; |
2708 | } | 2708 | } |
2709 | 2709 | ||
2710 | static int pmu_queue_tail(struct pmu_gk20a *pmu, struct pmu_queue *queue, | 2710 | static int pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, |
2711 | u32 *tail, bool set) | 2711 | u32 *tail, bool set) |
2712 | { | 2712 | { |
2713 | struct gk20a *g = gk20a_from_pmu(pmu); | 2713 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2745,19 +2745,19 @@ static int pmu_queue_tail(struct pmu_gk20a *pmu, struct pmu_queue *queue, | |||
2745 | return 0; | 2745 | return 0; |
2746 | } | 2746 | } |
2747 | 2747 | ||
2748 | static inline void pmu_queue_read(struct pmu_gk20a *pmu, | 2748 | static inline void pmu_queue_read(struct nvgpu_pmu *pmu, |
2749 | u32 offset, u8 *dst, u32 size) | 2749 | u32 offset, u8 *dst, u32 size) |
2750 | { | 2750 | { |
2751 | pmu_copy_from_dmem(pmu, offset, dst, size, 0); | 2751 | pmu_copy_from_dmem(pmu, offset, dst, size, 0); |
2752 | } | 2752 | } |
2753 | 2753 | ||
2754 | static inline void pmu_queue_write(struct pmu_gk20a *pmu, | 2754 | static inline void pmu_queue_write(struct nvgpu_pmu *pmu, |
2755 | u32 offset, u8 *src, u32 size) | 2755 | u32 offset, u8 *src, u32 size) |
2756 | { | 2756 | { |
2757 | pmu_copy_to_dmem(pmu, offset, src, size, 0); | 2757 | pmu_copy_to_dmem(pmu, offset, src, size, 0); |
2758 | } | 2758 | } |
2759 | 2759 | ||
2760 | int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token) | 2760 | int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) |
2761 | { | 2761 | { |
2762 | struct gk20a *g = gk20a_from_pmu(pmu); | 2762 | struct gk20a *g = gk20a_from_pmu(pmu); |
2763 | struct pmu_mutex *mutex; | 2763 | struct pmu_mutex *mutex; |
@@ -2826,7 +2826,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token) | |||
2826 | return -EBUSY; | 2826 | return -EBUSY; |
2827 | } | 2827 | } |
2828 | 2828 | ||
2829 | int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token) | 2829 | int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) |
2830 | { | 2830 | { |
2831 | struct gk20a *g = gk20a_from_pmu(pmu); | 2831 | struct gk20a *g = gk20a_from_pmu(pmu); |
2832 | struct pmu_mutex *mutex; | 2832 | struct pmu_mutex *mutex; |
@@ -2867,7 +2867,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token) | |||
2867 | return 0; | 2867 | return 0; |
2868 | } | 2868 | } |
2869 | 2869 | ||
2870 | static int pmu_queue_lock(struct pmu_gk20a *pmu, | 2870 | static int pmu_queue_lock(struct nvgpu_pmu *pmu, |
2871 | struct pmu_queue *queue) | 2871 | struct pmu_queue *queue) |
2872 | { | 2872 | { |
2873 | int err; | 2873 | int err; |
@@ -2884,7 +2884,7 @@ static int pmu_queue_lock(struct pmu_gk20a *pmu, | |||
2884 | return err; | 2884 | return err; |
2885 | } | 2885 | } |
2886 | 2886 | ||
2887 | static int pmu_queue_unlock(struct pmu_gk20a *pmu, | 2887 | static int pmu_queue_unlock(struct nvgpu_pmu *pmu, |
2888 | struct pmu_queue *queue) | 2888 | struct pmu_queue *queue) |
2889 | { | 2889 | { |
2890 | int err; | 2890 | int err; |
@@ -2902,7 +2902,7 @@ static int pmu_queue_unlock(struct pmu_gk20a *pmu, | |||
2902 | } | 2902 | } |
2903 | 2903 | ||
2904 | /* called by pmu_read_message, no lock */ | 2904 | /* called by pmu_read_message, no lock */ |
2905 | static bool pmu_queue_is_empty(struct pmu_gk20a *pmu, | 2905 | static bool pmu_queue_is_empty(struct nvgpu_pmu *pmu, |
2906 | struct pmu_queue *queue) | 2906 | struct pmu_queue *queue) |
2907 | { | 2907 | { |
2908 | u32 head, tail; | 2908 | u32 head, tail; |
@@ -2916,7 +2916,7 @@ static bool pmu_queue_is_empty(struct pmu_gk20a *pmu, | |||
2916 | return head == tail; | 2916 | return head == tail; |
2917 | } | 2917 | } |
2918 | 2918 | ||
2919 | static bool pmu_queue_has_room(struct pmu_gk20a *pmu, | 2919 | static bool pmu_queue_has_room(struct nvgpu_pmu *pmu, |
2920 | struct pmu_queue *queue, u32 size, bool *need_rewind) | 2920 | struct pmu_queue *queue, u32 size, bool *need_rewind) |
2921 | { | 2921 | { |
2922 | u32 head, tail; | 2922 | u32 head, tail; |
@@ -2946,7 +2946,7 @@ static bool pmu_queue_has_room(struct pmu_gk20a *pmu, | |||
2946 | return size <= free; | 2946 | return size <= free; |
2947 | } | 2947 | } |
2948 | 2948 | ||
2949 | static int pmu_queue_push(struct pmu_gk20a *pmu, | 2949 | static int pmu_queue_push(struct nvgpu_pmu *pmu, |
2950 | struct pmu_queue *queue, void *data, u32 size) | 2950 | struct pmu_queue *queue, void *data, u32 size) |
2951 | { | 2951 | { |
2952 | 2952 | ||
@@ -2962,7 +2962,7 @@ static int pmu_queue_push(struct pmu_gk20a *pmu, | |||
2962 | return 0; | 2962 | return 0; |
2963 | } | 2963 | } |
2964 | 2964 | ||
2965 | static int pmu_queue_pop(struct pmu_gk20a *pmu, | 2965 | static int pmu_queue_pop(struct nvgpu_pmu *pmu, |
2966 | struct pmu_queue *queue, void *data, u32 size, | 2966 | struct pmu_queue *queue, void *data, u32 size, |
2967 | u32 *bytes_read) | 2967 | u32 *bytes_read) |
2968 | { | 2968 | { |
@@ -2998,7 +2998,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu, | |||
2998 | return 0; | 2998 | return 0; |
2999 | } | 2999 | } |
3000 | 3000 | ||
3001 | static void pmu_queue_rewind(struct pmu_gk20a *pmu, | 3001 | static void pmu_queue_rewind(struct nvgpu_pmu *pmu, |
3002 | struct pmu_queue *queue) | 3002 | struct pmu_queue *queue) |
3003 | { | 3003 | { |
3004 | struct pmu_cmd cmd; | 3004 | struct pmu_cmd cmd; |
@@ -3022,7 +3022,7 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu, | |||
3022 | } | 3022 | } |
3023 | 3023 | ||
3024 | /* open for read and lock the queue */ | 3024 | /* open for read and lock the queue */ |
3025 | static int pmu_queue_open_read(struct pmu_gk20a *pmu, | 3025 | static int pmu_queue_open_read(struct nvgpu_pmu *pmu, |
3026 | struct pmu_queue *queue) | 3026 | struct pmu_queue *queue) |
3027 | { | 3027 | { |
3028 | int err; | 3028 | int err; |
@@ -3043,7 +3043,7 @@ static int pmu_queue_open_read(struct pmu_gk20a *pmu, | |||
3043 | 3043 | ||
3044 | /* open for write and lock the queue | 3044 | /* open for write and lock the queue |
3045 | make sure there's enough free space for the write */ | 3045 | make sure there's enough free space for the write */ |
3046 | static int pmu_queue_open_write(struct pmu_gk20a *pmu, | 3046 | static int pmu_queue_open_write(struct nvgpu_pmu *pmu, |
3047 | struct pmu_queue *queue, u32 size) | 3047 | struct pmu_queue *queue, u32 size) |
3048 | { | 3048 | { |
3049 | bool rewind = false; | 3049 | bool rewind = false; |
@@ -3074,7 +3074,7 @@ static int pmu_queue_open_write(struct pmu_gk20a *pmu, | |||
3074 | } | 3074 | } |
3075 | 3075 | ||
3076 | /* close and unlock the queue */ | 3076 | /* close and unlock the queue */ |
3077 | static int pmu_queue_close(struct pmu_gk20a *pmu, | 3077 | static int pmu_queue_close(struct nvgpu_pmu *pmu, |
3078 | struct pmu_queue *queue, bool commit) | 3078 | struct pmu_queue *queue, bool commit) |
3079 | { | 3079 | { |
3080 | if (!queue->opened) | 3080 | if (!queue->opened) |
@@ -3098,7 +3098,7 @@ static int pmu_queue_close(struct pmu_gk20a *pmu, | |||
3098 | return 0; | 3098 | return 0; |
3099 | } | 3099 | } |
3100 | 3100 | ||
3101 | void gk20a_remove_pmu_support(struct pmu_gk20a *pmu) | 3101 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu) |
3102 | { | 3102 | { |
3103 | struct gk20a *g = gk20a_from_pmu(pmu); | 3103 | struct gk20a *g = gk20a_from_pmu(pmu); |
3104 | 3104 | ||
@@ -3118,7 +3118,7 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu) | |||
3118 | 3118 | ||
3119 | static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) | 3119 | static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) |
3120 | { | 3120 | { |
3121 | struct pmu_gk20a *pmu = &g->pmu; | 3121 | struct nvgpu_pmu *pmu = &g->pmu; |
3122 | 3122 | ||
3123 | gk20a_dbg_fn(""); | 3123 | gk20a_dbg_fn(""); |
3124 | 3124 | ||
@@ -3129,7 +3129,7 @@ static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) | |||
3129 | 3129 | ||
3130 | static int gk20a_prepare_ucode(struct gk20a *g) | 3130 | static int gk20a_prepare_ucode(struct gk20a *g) |
3131 | { | 3131 | { |
3132 | struct pmu_gk20a *pmu = &g->pmu; | 3132 | struct nvgpu_pmu *pmu = &g->pmu; |
3133 | int err = 0; | 3133 | int err = 0; |
3134 | struct mm_gk20a *mm = &g->mm; | 3134 | struct mm_gk20a *mm = &g->mm; |
3135 | struct vm_gk20a *vm = &mm->pmu.vm; | 3135 | struct vm_gk20a *vm = &mm->pmu.vm; |
@@ -3168,7 +3168,7 @@ static int gk20a_prepare_ucode(struct gk20a *g) | |||
3168 | 3168 | ||
3169 | static int gk20a_init_pmu_setup_sw(struct gk20a *g) | 3169 | static int gk20a_init_pmu_setup_sw(struct gk20a *g) |
3170 | { | 3170 | { |
3171 | struct pmu_gk20a *pmu = &g->pmu; | 3171 | struct nvgpu_pmu *pmu = &g->pmu; |
3172 | struct mm_gk20a *mm = &g->mm; | 3172 | struct mm_gk20a *mm = &g->mm; |
3173 | struct vm_gk20a *vm = &mm->pmu.vm; | 3173 | struct vm_gk20a *vm = &mm->pmu.vm; |
3174 | unsigned int i; | 3174 | unsigned int i; |
@@ -3266,7 +3266,7 @@ skip_init: | |||
3266 | static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, | 3266 | static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, |
3267 | void *param, u32 handle, u32 status) | 3267 | void *param, u32 handle, u32 status) |
3268 | { | 3268 | { |
3269 | struct pmu_gk20a *pmu = param; | 3269 | struct nvgpu_pmu *pmu = param; |
3270 | struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat; | 3270 | struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat; |
3271 | 3271 | ||
3272 | gk20a_dbg_fn(""); | 3272 | gk20a_dbg_fn(""); |
@@ -3289,7 +3289,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, | |||
3289 | 3289 | ||
3290 | static int gk20a_init_pmu_setup_hw1(struct gk20a *g) | 3290 | static int gk20a_init_pmu_setup_hw1(struct gk20a *g) |
3291 | { | 3291 | { |
3292 | struct pmu_gk20a *pmu = &g->pmu; | 3292 | struct nvgpu_pmu *pmu = &g->pmu; |
3293 | int err = 0; | 3293 | int err = 0; |
3294 | 3294 | ||
3295 | gk20a_dbg_fn(""); | 3295 | gk20a_dbg_fn(""); |
@@ -3327,7 +3327,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g); | |||
3327 | static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, | 3327 | static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, |
3328 | bool post_change_event) | 3328 | bool post_change_event) |
3329 | { | 3329 | { |
3330 | struct pmu_gk20a *pmu = &g->pmu; | 3330 | struct nvgpu_pmu *pmu = &g->pmu; |
3331 | 3331 | ||
3332 | pmu->pmu_state = pmu_state; | 3332 | pmu->pmu_state = pmu_state; |
3333 | 3333 | ||
@@ -3343,7 +3343,7 @@ static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, | |||
3343 | static int nvgpu_pg_init_task(void *arg) | 3343 | static int nvgpu_pg_init_task(void *arg) |
3344 | { | 3344 | { |
3345 | struct gk20a *g = (struct gk20a *)arg; | 3345 | struct gk20a *g = (struct gk20a *)arg; |
3346 | struct pmu_gk20a *pmu = &g->pmu; | 3346 | struct nvgpu_pmu *pmu = &g->pmu; |
3347 | struct nvgpu_pg_init *pg_init = &pmu->pg_init; | 3347 | struct nvgpu_pg_init *pg_init = &pmu->pg_init; |
3348 | u32 pmu_state = 0; | 3348 | u32 pmu_state = 0; |
3349 | 3349 | ||
@@ -3396,7 +3396,7 @@ static int nvgpu_pg_init_task(void *arg) | |||
3396 | 3396 | ||
3397 | static int nvgpu_init_task_pg_init(struct gk20a *g) | 3397 | static int nvgpu_init_task_pg_init(struct gk20a *g) |
3398 | { | 3398 | { |
3399 | struct pmu_gk20a *pmu = &g->pmu; | 3399 | struct nvgpu_pmu *pmu = &g->pmu; |
3400 | char thread_name[64]; | 3400 | char thread_name[64]; |
3401 | int err = 0; | 3401 | int err = 0; |
3402 | 3402 | ||
@@ -3415,7 +3415,7 @@ static int nvgpu_init_task_pg_init(struct gk20a *g) | |||
3415 | 3415 | ||
3416 | int gk20a_init_pmu_bind_fecs(struct gk20a *g) | 3416 | int gk20a_init_pmu_bind_fecs(struct gk20a *g) |
3417 | { | 3417 | { |
3418 | struct pmu_gk20a *pmu = &g->pmu; | 3418 | struct nvgpu_pmu *pmu = &g->pmu; |
3419 | struct pmu_cmd cmd; | 3419 | struct pmu_cmd cmd; |
3420 | u32 desc; | 3420 | u32 desc; |
3421 | int err = 0; | 3421 | int err = 0; |
@@ -3454,7 +3454,7 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g) | |||
3454 | 3454 | ||
3455 | static void pmu_setup_hw_load_zbc(struct gk20a *g) | 3455 | static void pmu_setup_hw_load_zbc(struct gk20a *g) |
3456 | { | 3456 | { |
3457 | struct pmu_gk20a *pmu = &g->pmu; | 3457 | struct nvgpu_pmu *pmu = &g->pmu; |
3458 | struct pmu_cmd cmd; | 3458 | struct pmu_cmd cmd; |
3459 | u32 desc; | 3459 | u32 desc; |
3460 | u32 gr_engine_id; | 3460 | u32 gr_engine_id; |
@@ -3489,7 +3489,7 @@ static void pmu_setup_hw_load_zbc(struct gk20a *g) | |||
3489 | 3489 | ||
3490 | static void pmu_setup_hw_enable_elpg(struct gk20a *g) | 3490 | static void pmu_setup_hw_enable_elpg(struct gk20a *g) |
3491 | { | 3491 | { |
3492 | struct pmu_gk20a *pmu = &g->pmu; | 3492 | struct nvgpu_pmu *pmu = &g->pmu; |
3493 | 3493 | ||
3494 | /* | 3494 | /* |
3495 | * FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to | 3495 | * FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to |
@@ -3532,7 +3532,7 @@ static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr) | |||
3532 | int gk20a_pmu_reset(struct gk20a *g) | 3532 | int gk20a_pmu_reset(struct gk20a *g) |
3533 | { | 3533 | { |
3534 | int err; | 3534 | int err; |
3535 | struct pmu_gk20a *pmu = &g->pmu; | 3535 | struct nvgpu_pmu *pmu = &g->pmu; |
3536 | 3536 | ||
3537 | err = pmu_reset(pmu); | 3537 | err = pmu_reset(pmu); |
3538 | 3538 | ||
@@ -3592,7 +3592,7 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops) | |||
3592 | 3592 | ||
3593 | int gk20a_init_pmu_support(struct gk20a *g) | 3593 | int gk20a_init_pmu_support(struct gk20a *g) |
3594 | { | 3594 | { |
3595 | struct pmu_gk20a *pmu = &g->pmu; | 3595 | struct nvgpu_pmu *pmu = &g->pmu; |
3596 | u32 err; | 3596 | u32 err; |
3597 | 3597 | ||
3598 | gk20a_dbg_fn(""); | 3598 | gk20a_dbg_fn(""); |
@@ -3621,7 +3621,7 @@ int gk20a_init_pmu_support(struct gk20a *g) | |||
3621 | static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, | 3621 | static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, |
3622 | void *param, u32 handle, u32 status) | 3622 | void *param, u32 handle, u32 status) |
3623 | { | 3623 | { |
3624 | struct pmu_gk20a *pmu = param; | 3624 | struct nvgpu_pmu *pmu = param; |
3625 | struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg; | 3625 | struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg; |
3626 | 3626 | ||
3627 | gk20a_dbg_fn(""); | 3627 | gk20a_dbg_fn(""); |
@@ -3681,7 +3681,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, | |||
3681 | static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, | 3681 | static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, |
3682 | void *param, u32 handle, u32 status) | 3682 | void *param, u32 handle, u32 status) |
3683 | { | 3683 | { |
3684 | struct pmu_gk20a *pmu = param; | 3684 | struct nvgpu_pmu *pmu = param; |
3685 | 3685 | ||
3686 | gk20a_dbg_fn(""); | 3686 | gk20a_dbg_fn(""); |
3687 | 3687 | ||
@@ -3704,7 +3704,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, | |||
3704 | 3704 | ||
3705 | static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) | 3705 | static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) |
3706 | { | 3706 | { |
3707 | struct pmu_gk20a *pmu = &g->pmu; | 3707 | struct nvgpu_pmu *pmu = &g->pmu; |
3708 | struct pmu_cmd cmd; | 3708 | struct pmu_cmd cmd; |
3709 | u32 seq; | 3709 | u32 seq; |
3710 | 3710 | ||
@@ -3766,7 +3766,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) | |||
3766 | } | 3766 | } |
3767 | static int pmu_init_powergating(struct gk20a *g) | 3767 | static int pmu_init_powergating(struct gk20a *g) |
3768 | { | 3768 | { |
3769 | struct pmu_gk20a *pmu = &g->pmu; | 3769 | struct nvgpu_pmu *pmu = &g->pmu; |
3770 | u32 pg_engine_id; | 3770 | u32 pg_engine_id; |
3771 | u32 pg_engine_id_list = 0; | 3771 | u32 pg_engine_id_list = 0; |
3772 | 3772 | ||
@@ -3795,7 +3795,7 @@ static int pmu_init_powergating(struct gk20a *g) | |||
3795 | return 0; | 3795 | return 0; |
3796 | } | 3796 | } |
3797 | 3797 | ||
3798 | static u8 get_perfmon_id(struct pmu_gk20a *pmu) | 3798 | static u8 get_perfmon_id(struct nvgpu_pmu *pmu) |
3799 | { | 3799 | { |
3800 | struct gk20a *g = gk20a_from_pmu(pmu); | 3800 | struct gk20a *g = gk20a_from_pmu(pmu); |
3801 | u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl; | 3801 | u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl; |
@@ -3824,7 +3824,7 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu) | |||
3824 | return unit_id; | 3824 | return unit_id; |
3825 | } | 3825 | } |
3826 | 3826 | ||
3827 | static int pmu_init_perfmon(struct pmu_gk20a *pmu) | 3827 | static int pmu_init_perfmon(struct nvgpu_pmu *pmu) |
3828 | { | 3828 | { |
3829 | struct gk20a *g = gk20a_from_pmu(pmu); | 3829 | struct gk20a *g = gk20a_from_pmu(pmu); |
3830 | struct pmu_v *pv = &g->ops.pmu_ver; | 3830 | struct pmu_v *pv = &g->ops.pmu_ver; |
@@ -3924,7 +3924,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu) | |||
3924 | return 0; | 3924 | return 0; |
3925 | } | 3925 | } |
3926 | 3926 | ||
3927 | static int pmu_process_init_msg(struct pmu_gk20a *pmu, | 3927 | static int pmu_process_init_msg(struct nvgpu_pmu *pmu, |
3928 | struct pmu_msg *msg) | 3928 | struct pmu_msg *msg) |
3929 | { | 3929 | { |
3930 | struct gk20a *g = gk20a_from_pmu(pmu); | 3930 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4002,7 +4002,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu, | |||
4002 | return 0; | 4002 | return 0; |
4003 | } | 4003 | } |
4004 | 4004 | ||
4005 | static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, | 4005 | static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, |
4006 | struct pmu_msg *msg, int *status) | 4006 | struct pmu_msg *msg, int *status) |
4007 | { | 4007 | { |
4008 | struct gk20a *g = gk20a_from_pmu(pmu); | 4008 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4077,7 +4077,7 @@ clean_up: | |||
4077 | return false; | 4077 | return false; |
4078 | } | 4078 | } |
4079 | 4079 | ||
4080 | static int pmu_response_handle(struct pmu_gk20a *pmu, | 4080 | static int pmu_response_handle(struct nvgpu_pmu *pmu, |
4081 | struct pmu_msg *msg) | 4081 | struct pmu_msg *msg) |
4082 | { | 4082 | { |
4083 | struct gk20a *g = gk20a_from_pmu(pmu); | 4083 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4168,14 +4168,14 @@ static int pmu_response_handle(struct pmu_gk20a *pmu, | |||
4168 | static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, | 4168 | static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, |
4169 | void *param, u32 handle, u32 status) | 4169 | void *param, u32 handle, u32 status) |
4170 | { | 4170 | { |
4171 | struct pmu_gk20a *pmu = param; | 4171 | struct nvgpu_pmu *pmu = param; |
4172 | gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); | 4172 | gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); |
4173 | pmu->zbc_save_done = 1; | 4173 | pmu->zbc_save_done = 1; |
4174 | } | 4174 | } |
4175 | 4175 | ||
4176 | void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) | 4176 | void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) |
4177 | { | 4177 | { |
4178 | struct pmu_gk20a *pmu = &g->pmu; | 4178 | struct nvgpu_pmu *pmu = &g->pmu; |
4179 | struct pmu_cmd cmd; | 4179 | struct pmu_cmd cmd; |
4180 | u32 seq; | 4180 | u32 seq; |
4181 | 4181 | ||
@@ -4199,7 +4199,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) | |||
4199 | nvgpu_err(g, "ZBC save timeout"); | 4199 | nvgpu_err(g, "ZBC save timeout"); |
4200 | } | 4200 | } |
4201 | 4201 | ||
4202 | int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) | 4202 | int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu) |
4203 | { | 4203 | { |
4204 | struct gk20a *g = gk20a_from_pmu(pmu); | 4204 | struct gk20a *g = gk20a_from_pmu(pmu); |
4205 | struct pmu_v *pv = &g->ops.pmu_ver; | 4205 | struct pmu_v *pv = &g->ops.pmu_ver; |
@@ -4243,7 +4243,7 @@ int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) | |||
4243 | return 0; | 4243 | return 0; |
4244 | } | 4244 | } |
4245 | 4245 | ||
4246 | int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu) | 4246 | int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu) |
4247 | { | 4247 | { |
4248 | struct gk20a *g = gk20a_from_pmu(pmu); | 4248 | struct gk20a *g = gk20a_from_pmu(pmu); |
4249 | struct pmu_cmd cmd; | 4249 | struct pmu_cmd cmd; |
@@ -4261,7 +4261,7 @@ int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu) | |||
4261 | return 0; | 4261 | return 0; |
4262 | } | 4262 | } |
4263 | 4263 | ||
4264 | static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu, | 4264 | static int pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, |
4265 | struct pmu_perfmon_msg *msg) | 4265 | struct pmu_perfmon_msg *msg) |
4266 | { | 4266 | { |
4267 | gk20a_dbg_fn(""); | 4267 | gk20a_dbg_fn(""); |
@@ -4294,7 +4294,7 @@ static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu, | |||
4294 | } | 4294 | } |
4295 | 4295 | ||
4296 | 4296 | ||
4297 | static int pmu_handle_therm_event(struct pmu_gk20a *pmu, | 4297 | static int pmu_handle_therm_event(struct nvgpu_pmu *pmu, |
4298 | struct nv_pmu_therm_msg *msg) | 4298 | struct nv_pmu_therm_msg *msg) |
4299 | { | 4299 | { |
4300 | gk20a_dbg_fn(""); | 4300 | gk20a_dbg_fn(""); |
@@ -4318,7 +4318,7 @@ static int pmu_handle_therm_event(struct pmu_gk20a *pmu, | |||
4318 | return 0; | 4318 | return 0; |
4319 | } | 4319 | } |
4320 | 4320 | ||
4321 | static int pmu_handle_event(struct pmu_gk20a *pmu, struct pmu_msg *msg) | 4321 | static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg) |
4322 | { | 4322 | { |
4323 | int err = 0; | 4323 | int err = 0; |
4324 | struct gk20a *g = gk20a_from_pmu(pmu); | 4324 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4347,7 +4347,7 @@ static int pmu_handle_event(struct pmu_gk20a *pmu, struct pmu_msg *msg) | |||
4347 | return err; | 4347 | return err; |
4348 | } | 4348 | } |
4349 | 4349 | ||
4350 | static int pmu_process_message(struct pmu_gk20a *pmu) | 4350 | static int pmu_process_message(struct nvgpu_pmu *pmu) |
4351 | { | 4351 | { |
4352 | struct pmu_msg msg; | 4352 | struct pmu_msg msg; |
4353 | int status; | 4353 | int status; |
@@ -4383,7 +4383,7 @@ static int pmu_process_message(struct pmu_gk20a *pmu) | |||
4383 | return 0; | 4383 | return 0; |
4384 | } | 4384 | } |
4385 | 4385 | ||
4386 | int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, | 4386 | int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, |
4387 | u32 *var, u32 val) | 4387 | u32 *var, u32 val) |
4388 | { | 4388 | { |
4389 | struct gk20a *g = gk20a_from_pmu(pmu); | 4389 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4411,7 +4411,7 @@ int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, | |||
4411 | return -ETIMEDOUT; | 4411 | return -ETIMEDOUT; |
4412 | } | 4412 | } |
4413 | 4413 | ||
4414 | static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu) | 4414 | static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) |
4415 | { | 4415 | { |
4416 | struct gk20a *g = gk20a_from_pmu(pmu); | 4416 | struct gk20a *g = gk20a_from_pmu(pmu); |
4417 | struct pmu_pg_stats stats; | 4417 | struct pmu_pg_stats stats; |
@@ -4484,7 +4484,7 @@ static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu) | |||
4484 | */ | 4484 | */ |
4485 | } | 4485 | } |
4486 | 4486 | ||
4487 | void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) | 4487 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) |
4488 | { | 4488 | { |
4489 | struct gk20a *g = gk20a_from_pmu(pmu); | 4489 | struct gk20a *g = gk20a_from_pmu(pmu); |
4490 | unsigned int i; | 4490 | unsigned int i; |
@@ -4610,7 +4610,7 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) | |||
4610 | 4610 | ||
4611 | void gk20a_pmu_isr(struct gk20a *g) | 4611 | void gk20a_pmu_isr(struct gk20a *g) |
4612 | { | 4612 | { |
4613 | struct pmu_gk20a *pmu = &g->pmu; | 4613 | struct nvgpu_pmu *pmu = &g->pmu; |
4614 | struct pmu_queue *queue; | 4614 | struct pmu_queue *queue; |
4615 | u32 intr, mask; | 4615 | u32 intr, mask; |
4616 | bool recheck = false; | 4616 | bool recheck = false; |
@@ -4672,7 +4672,7 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
4672 | nvgpu_mutex_release(&pmu->isr_mutex); | 4672 | nvgpu_mutex_release(&pmu->isr_mutex); |
4673 | } | 4673 | } |
4674 | 4674 | ||
4675 | static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, | 4675 | static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, |
4676 | struct pmu_msg *msg, struct pmu_payload *payload, | 4676 | struct pmu_msg *msg, struct pmu_payload *payload, |
4677 | u32 queue_id) | 4677 | u32 queue_id) |
4678 | { | 4678 | { |
@@ -4742,7 +4742,7 @@ invalid_cmd: | |||
4742 | return false; | 4742 | return false; |
4743 | } | 4743 | } |
4744 | 4744 | ||
4745 | static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, | 4745 | static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, |
4746 | u32 queue_id, unsigned long timeout_ms) | 4746 | u32 queue_id, unsigned long timeout_ms) |
4747 | { | 4747 | { |
4748 | struct gk20a *g = gk20a_from_pmu(pmu); | 4748 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4832,7 +4832,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, | |||
4832 | u32 queue_id, pmu_callback callback, void* cb_param, | 4832 | u32 queue_id, pmu_callback callback, void* cb_param, |
4833 | u32 *seq_desc, unsigned long timeout) | 4833 | u32 *seq_desc, unsigned long timeout) |
4834 | { | 4834 | { |
4835 | struct pmu_gk20a *pmu = &g->pmu; | 4835 | struct nvgpu_pmu *pmu = &g->pmu; |
4836 | struct pmu_v *pv = &g->ops.pmu_ver; | 4836 | struct pmu_v *pv = &g->ops.pmu_ver; |
4837 | struct pmu_sequence *seq; | 4837 | struct pmu_sequence *seq; |
4838 | void *in = NULL, *out = NULL; | 4838 | void *in = NULL, *out = NULL; |
@@ -5022,7 +5022,7 @@ int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) | |||
5022 | 5022 | ||
5023 | static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) | 5023 | static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) |
5024 | { | 5024 | { |
5025 | struct pmu_gk20a *pmu = &g->pmu; | 5025 | struct nvgpu_pmu *pmu = &g->pmu; |
5026 | struct pmu_cmd cmd; | 5026 | struct pmu_cmd cmd; |
5027 | u32 seq, status; | 5027 | u32 seq, status; |
5028 | 5028 | ||
@@ -5057,7 +5057,7 @@ static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) | |||
5057 | 5057 | ||
5058 | int gk20a_pmu_enable_elpg(struct gk20a *g) | 5058 | int gk20a_pmu_enable_elpg(struct gk20a *g) |
5059 | { | 5059 | { |
5060 | struct pmu_gk20a *pmu = &g->pmu; | 5060 | struct nvgpu_pmu *pmu = &g->pmu; |
5061 | struct gr_gk20a *gr = &g->gr; | 5061 | struct gr_gk20a *gr = &g->gr; |
5062 | u32 pg_engine_id; | 5062 | u32 pg_engine_id; |
5063 | u32 pg_engine_id_list = 0; | 5063 | u32 pg_engine_id_list = 0; |
@@ -5115,7 +5115,7 @@ exit_unlock: | |||
5115 | 5115 | ||
5116 | int gk20a_pmu_disable_elpg(struct gk20a *g) | 5116 | int gk20a_pmu_disable_elpg(struct gk20a *g) |
5117 | { | 5117 | { |
5118 | struct pmu_gk20a *pmu = &g->pmu; | 5118 | struct nvgpu_pmu *pmu = &g->pmu; |
5119 | struct pmu_cmd cmd; | 5119 | struct pmu_cmd cmd; |
5120 | u32 seq; | 5120 | u32 seq; |
5121 | int ret = 0; | 5121 | int ret = 0; |
@@ -5225,7 +5225,7 @@ exit_unlock: | |||
5225 | 5225 | ||
5226 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) | 5226 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) |
5227 | { | 5227 | { |
5228 | struct pmu_gk20a *pmu = &g->pmu; | 5228 | struct nvgpu_pmu *pmu = &g->pmu; |
5229 | int err; | 5229 | int err; |
5230 | 5230 | ||
5231 | gk20a_dbg_fn(""); | 5231 | gk20a_dbg_fn(""); |
@@ -5240,7 +5240,7 @@ int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) | |||
5240 | 5240 | ||
5241 | int gk20a_pmu_destroy(struct gk20a *g) | 5241 | int gk20a_pmu_destroy(struct gk20a *g) |
5242 | { | 5242 | { |
5243 | struct pmu_gk20a *pmu = &g->pmu; | 5243 | struct nvgpu_pmu *pmu = &g->pmu; |
5244 | struct pmu_pg_stats_data pg_stat_data = { 0 }; | 5244 | struct pmu_pg_stats_data pg_stat_data = { 0 }; |
5245 | struct nvgpu_timeout timeout; | 5245 | struct nvgpu_timeout timeout; |
5246 | int i; | 5246 | int i; |
@@ -5306,7 +5306,7 @@ int gk20a_pmu_load_norm(struct gk20a *g, u32 *load) | |||
5306 | 5306 | ||
5307 | int gk20a_pmu_load_update(struct gk20a *g) | 5307 | int gk20a_pmu_load_update(struct gk20a *g) |
5308 | { | 5308 | { |
5309 | struct pmu_gk20a *pmu = &g->pmu; | 5309 | struct nvgpu_pmu *pmu = &g->pmu; |
5310 | u16 _load = 0; | 5310 | u16 _load = 0; |
5311 | 5311 | ||
5312 | if (!pmu->perfmon_ready) { | 5312 | if (!pmu->perfmon_ready) { |
@@ -5354,7 +5354,7 @@ void gk20a_pmu_reset_load_counters(struct gk20a *g) | |||
5354 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, | 5354 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, |
5355 | struct pmu_pg_stats_data *pg_stat_data) | 5355 | struct pmu_pg_stats_data *pg_stat_data) |
5356 | { | 5356 | { |
5357 | struct pmu_gk20a *pmu = &g->pmu; | 5357 | struct nvgpu_pmu *pmu = &g->pmu; |
5358 | struct pmu_pg_stats stats; | 5358 | struct pmu_pg_stats stats; |
5359 | 5359 | ||
5360 | pmu_copy_from_dmem(pmu, | 5360 | pmu_copy_from_dmem(pmu, |
@@ -5372,7 +5372,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, | |||
5372 | u32 pg_engine_id, | 5372 | u32 pg_engine_id, |
5373 | struct pmu_pg_stats_data *pg_stat_data) | 5373 | struct pmu_pg_stats_data *pg_stat_data) |
5374 | { | 5374 | { |
5375 | struct pmu_gk20a *pmu = &g->pmu; | 5375 | struct nvgpu_pmu *pmu = &g->pmu; |
5376 | u32 pg_engine_id_list = 0; | 5376 | u32 pg_engine_id_list = 0; |
5377 | 5377 | ||
5378 | if (!pmu->initialized) { | 5378 | if (!pmu->initialized) { |
@@ -5396,7 +5396,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, | |||
5396 | int gk20a_pmu_ap_send_command(struct gk20a *g, | 5396 | int gk20a_pmu_ap_send_command(struct gk20a *g, |
5397 | union pmu_ap_cmd *p_ap_cmd, bool b_block) | 5397 | union pmu_ap_cmd *p_ap_cmd, bool b_block) |
5398 | { | 5398 | { |
5399 | struct pmu_gk20a *pmu = &g->pmu; | 5399 | struct nvgpu_pmu *pmu = &g->pmu; |
5400 | /* FIXME: where is the PG structure defined?? */ | 5400 | /* FIXME: where is the PG structure defined?? */ |
5401 | u32 status = 0; | 5401 | u32 status = 0; |
5402 | struct pmu_cmd cmd; | 5402 | struct pmu_cmd cmd; |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index cfcf3947..3941d90f 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |||
@@ -24,14 +24,10 @@ | |||
24 | #include <linux/version.h> | 24 | #include <linux/version.h> |
25 | #include <nvgpu/flcnif_cmn.h> | 25 | #include <nvgpu/flcnif_cmn.h> |
26 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> | 26 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> |
27 | #include <nvgpu/pmu.h> | ||
27 | 28 | ||
28 | struct nvgpu_firmware; | 29 | struct nvgpu_firmware; |
29 | 30 | ||
30 | /* defined by pmu hw spec */ | ||
31 | #define GK20A_PMU_VA_SIZE (512 * 1024 * 1024) | ||
32 | #define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024) | ||
33 | #define GK20A_PMU_SEQ_BUF_SIZE 4096 | ||
34 | |||
35 | #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) | 31 | #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) |
36 | 32 | ||
37 | #define APP_VERSION_NC_3 21688026 | 33 | #define APP_VERSION_NC_3 21688026 |
@@ -56,127 +52,11 @@ struct nvgpu_firmware; | |||
56 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) | 52 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) |
57 | #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 | 53 | #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 |
58 | #endif | 54 | #endif |
59 | #define PMU_MODE_MISMATCH_STATUS_MAILBOX_R 6 | ||
60 | #define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEAD | ||
61 | |||
62 | enum { | ||
63 | GK20A_PMU_DMAIDX_UCODE = 0, | ||
64 | GK20A_PMU_DMAIDX_VIRT = 1, | ||
65 | GK20A_PMU_DMAIDX_PHYS_VID = 2, | ||
66 | GK20A_PMU_DMAIDX_PHYS_SYS_COH = 3, | ||
67 | GK20A_PMU_DMAIDX_PHYS_SYS_NCOH = 4, | ||
68 | GK20A_PMU_DMAIDX_RSVD = 5, | ||
69 | GK20A_PMU_DMAIDX_PELPG = 6, | ||
70 | GK20A_PMU_DMAIDX_END = 7 | ||
71 | }; | ||
72 | |||
73 | #define GK20A_PMU_TRACE_BUFSIZE 0x4000 /* 4K */ | ||
74 | #define GK20A_PMU_DMEM_BLKSIZE2 8 | ||
75 | |||
76 | #define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32 | ||
77 | #define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64 | ||
78 | |||
79 | struct pmu_ucode_desc { | ||
80 | u32 descriptor_size; | ||
81 | u32 image_size; | ||
82 | u32 tools_version; | ||
83 | u32 app_version; | ||
84 | char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH]; | ||
85 | u32 bootloader_start_offset; | ||
86 | u32 bootloader_size; | ||
87 | u32 bootloader_imem_offset; | ||
88 | u32 bootloader_entry_point; | ||
89 | u32 app_start_offset; | ||
90 | u32 app_size; | ||
91 | u32 app_imem_offset; | ||
92 | u32 app_imem_entry; | ||
93 | u32 app_dmem_offset; | ||
94 | u32 app_resident_code_offset; /* Offset from appStartOffset */ | ||
95 | u32 app_resident_code_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */ | ||
96 | u32 app_resident_data_offset; /* Offset from appStartOffset */ | ||
97 | u32 app_resident_data_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */ | ||
98 | u32 nb_overlays; | ||
99 | struct {u32 start; u32 size;} load_ovl[GK20A_PMU_UCODE_NB_MAX_OVERLAY]; | ||
100 | u32 compressed; | ||
101 | }; | ||
102 | |||
103 | struct pmu_ucode_desc_v1 { | ||
104 | u32 descriptor_size; | ||
105 | u32 image_size; | ||
106 | u32 tools_version; | ||
107 | u32 app_version; | ||
108 | char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH]; | ||
109 | u32 bootloader_start_offset; | ||
110 | u32 bootloader_size; | ||
111 | u32 bootloader_imem_offset; | ||
112 | u32 bootloader_entry_point; | ||
113 | u32 app_start_offset; | ||
114 | u32 app_size; | ||
115 | u32 app_imem_offset; | ||
116 | u32 app_imem_entry; | ||
117 | u32 app_dmem_offset; | ||
118 | u32 app_resident_code_offset; | ||
119 | u32 app_resident_code_size; | ||
120 | u32 app_resident_data_offset; | ||
121 | u32 app_resident_data_size; | ||
122 | u32 nb_imem_overlays; | ||
123 | u32 nb_dmem_overlays; | ||
124 | struct {u32 start; u32 size; } load_ovl[64]; | ||
125 | u32 compressed; | ||
126 | }; | ||
127 | 55 | ||
128 | #define PMU_PGENG_GR_BUFFER_IDX_INIT (0) | 56 | #define PMU_PGENG_GR_BUFFER_IDX_INIT (0) |
129 | #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) | 57 | #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) |
130 | #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) | 58 | #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) |
131 | 59 | ||
132 | struct pmu_gk20a; | ||
133 | struct pmu_queue; | ||
134 | |||
135 | struct pmu_queue { | ||
136 | |||
137 | /* used by hw, for BIOS/SMI queue */ | ||
138 | u32 mutex_id; | ||
139 | u32 mutex_lock; | ||
140 | /* used by sw, for LPQ/HPQ queue */ | ||
141 | struct nvgpu_mutex mutex; | ||
142 | |||
143 | /* current write position */ | ||
144 | u32 position; | ||
145 | /* physical dmem offset where this queue begins */ | ||
146 | u32 offset; | ||
147 | /* logical queue identifier */ | ||
148 | u32 id; | ||
149 | /* physical queue index */ | ||
150 | u32 index; | ||
151 | /* in bytes */ | ||
152 | u32 size; | ||
153 | |||
154 | /* open-flag */ | ||
155 | u32 oflag; | ||
156 | bool opened; /* opened implies locked */ | ||
157 | }; | ||
158 | |||
159 | struct pmu_mutex { | ||
160 | u32 id; | ||
161 | u32 index; | ||
162 | u32 ref_cnt; | ||
163 | }; | ||
164 | |||
165 | #define PMU_MAX_NUM_SEQUENCES (256) | ||
166 | #define PMU_SEQ_BIT_SHIFT (5) | ||
167 | #define PMU_SEQ_TBL_SIZE \ | ||
168 | (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT) | ||
169 | |||
170 | #define PMU_INVALID_SEQ_DESC (~0) | ||
171 | |||
172 | enum | ||
173 | { | ||
174 | PMU_SEQ_STATE_FREE = 0, | ||
175 | PMU_SEQ_STATE_PENDING, | ||
176 | PMU_SEQ_STATE_USED, | ||
177 | PMU_SEQ_STATE_CANCELLED | ||
178 | }; | ||
179 | |||
180 | struct pmu_payload { | 60 | struct pmu_payload { |
181 | struct { | 61 | struct { |
182 | void *buf; | 62 | void *buf; |
@@ -192,33 +72,6 @@ struct pmu_surface { | |||
192 | struct flcn_mem_desc_v0 params; | 72 | struct flcn_mem_desc_v0 params; |
193 | }; | 73 | }; |
194 | 74 | ||
195 | typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32, | ||
196 | u32); | ||
197 | |||
198 | struct pmu_sequence { | ||
199 | u8 id; | ||
200 | u32 state; | ||
201 | u32 desc; | ||
202 | struct pmu_msg *msg; | ||
203 | union { | ||
204 | struct pmu_allocation_v0 in_v0; | ||
205 | struct pmu_allocation_v1 in_v1; | ||
206 | struct pmu_allocation_v2 in_v2; | ||
207 | struct pmu_allocation_v3 in_v3; | ||
208 | }; | ||
209 | struct nvgpu_mem *in_mem; | ||
210 | union { | ||
211 | struct pmu_allocation_v0 out_v0; | ||
212 | struct pmu_allocation_v1 out_v1; | ||
213 | struct pmu_allocation_v2 out_v2; | ||
214 | struct pmu_allocation_v3 out_v3; | ||
215 | }; | ||
216 | struct nvgpu_mem *out_mem; | ||
217 | u8 *out_payload; | ||
218 | pmu_callback callback; | ||
219 | void* cb_params; | ||
220 | }; | ||
221 | |||
222 | /*PG defines used by nvpgu-pmu*/ | 75 | /*PG defines used by nvpgu-pmu*/ |
223 | struct pmu_pg_stats_data { | 76 | struct pmu_pg_stats_data { |
224 | u32 gating_cnt; | 77 | u32 gating_cnt; |
@@ -263,147 +116,6 @@ struct pmu_pg_stats_data { | |||
263 | #define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200) | 116 | #define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200) |
264 | /*PG defines used by nvpgu-pmu*/ | 117 | /*PG defines used by nvpgu-pmu*/ |
265 | 118 | ||
266 | /* Falcon Register index */ | ||
267 | #define PMU_FALCON_REG_R0 (0) | ||
268 | #define PMU_FALCON_REG_R1 (1) | ||
269 | #define PMU_FALCON_REG_R2 (2) | ||
270 | #define PMU_FALCON_REG_R3 (3) | ||
271 | #define PMU_FALCON_REG_R4 (4) | ||
272 | #define PMU_FALCON_REG_R5 (5) | ||
273 | #define PMU_FALCON_REG_R6 (6) | ||
274 | #define PMU_FALCON_REG_R7 (7) | ||
275 | #define PMU_FALCON_REG_R8 (8) | ||
276 | #define PMU_FALCON_REG_R9 (9) | ||
277 | #define PMU_FALCON_REG_R10 (10) | ||
278 | #define PMU_FALCON_REG_R11 (11) | ||
279 | #define PMU_FALCON_REG_R12 (12) | ||
280 | #define PMU_FALCON_REG_R13 (13) | ||
281 | #define PMU_FALCON_REG_R14 (14) | ||
282 | #define PMU_FALCON_REG_R15 (15) | ||
283 | #define PMU_FALCON_REG_IV0 (16) | ||
284 | #define PMU_FALCON_REG_IV1 (17) | ||
285 | #define PMU_FALCON_REG_UNDEFINED (18) | ||
286 | #define PMU_FALCON_REG_EV (19) | ||
287 | #define PMU_FALCON_REG_SP (20) | ||
288 | #define PMU_FALCON_REG_PC (21) | ||
289 | #define PMU_FALCON_REG_IMB (22) | ||
290 | #define PMU_FALCON_REG_DMB (23) | ||
291 | #define PMU_FALCON_REG_CSW (24) | ||
292 | #define PMU_FALCON_REG_CCR (25) | ||
293 | #define PMU_FALCON_REG_SEC (26) | ||
294 | #define PMU_FALCON_REG_CTX (27) | ||
295 | #define PMU_FALCON_REG_EXCI (28) | ||
296 | #define PMU_FALCON_REG_RSVD0 (29) | ||
297 | #define PMU_FALCON_REG_RSVD1 (30) | ||
298 | #define PMU_FALCON_REG_RSVD2 (31) | ||
299 | #define PMU_FALCON_REG_SIZE (32) | ||
300 | |||
301 | /* Choices for pmu_state */ | ||
302 | #define PMU_STATE_OFF 0 /* PMU is off */ | ||
303 | #define PMU_STATE_STARTING 1 /* PMU is on, but not booted */ | ||
304 | #define PMU_STATE_INIT_RECEIVED 2 /* PMU init message received */ | ||
305 | #define PMU_STATE_ELPG_BOOTING 3 /* PMU is booting */ | ||
306 | #define PMU_STATE_ELPG_BOOTED 4 /* ELPG is initialized */ | ||
307 | #define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */ | ||
308 | #define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */ | ||
309 | #define PMU_STATE_STARTED 7 /* Fully unitialized */ | ||
310 | #define PMU_STATE_EXIT 8 /* Exit PMU state machine */ | ||
311 | |||
312 | struct nvgpu_pg_init { | ||
313 | bool state_change; | ||
314 | struct nvgpu_cond wq; | ||
315 | struct nvgpu_thread state_task; | ||
316 | }; | ||
317 | |||
318 | struct pmu_gk20a { | ||
319 | |||
320 | union { | ||
321 | struct pmu_ucode_desc *desc; | ||
322 | struct pmu_ucode_desc_v1 *desc_v1; | ||
323 | }; | ||
324 | struct nvgpu_mem ucode; | ||
325 | |||
326 | struct nvgpu_mem pg_buf; | ||
327 | /* TBD: remove this if ZBC seq is fixed */ | ||
328 | struct nvgpu_mem seq_buf; | ||
329 | struct nvgpu_mem trace_buf; | ||
330 | struct nvgpu_mem wpr_buf; | ||
331 | bool buf_loaded; | ||
332 | |||
333 | struct pmu_sha1_gid gid_info; | ||
334 | |||
335 | struct pmu_queue queue[PMU_QUEUE_COUNT]; | ||
336 | |||
337 | struct pmu_sequence *seq; | ||
338 | unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE]; | ||
339 | u32 next_seq_desc; | ||
340 | |||
341 | struct pmu_mutex *mutex; | ||
342 | u32 mutex_cnt; | ||
343 | |||
344 | struct nvgpu_mutex pmu_copy_lock; | ||
345 | struct nvgpu_mutex pmu_seq_lock; | ||
346 | |||
347 | struct nvgpu_allocator dmem; | ||
348 | |||
349 | u32 *ucode_image; | ||
350 | bool pmu_ready; | ||
351 | |||
352 | u32 zbc_save_done; | ||
353 | |||
354 | u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE]; | ||
355 | |||
356 | u32 elpg_stat; | ||
357 | |||
358 | u32 mscg_stat; | ||
359 | u32 mscg_transition_state; | ||
360 | |||
361 | int pmu_state; | ||
362 | |||
363 | #define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ | ||
364 | struct nvgpu_pg_init pg_init; | ||
365 | struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */ | ||
366 | struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */ | ||
367 | int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */ | ||
368 | |||
369 | union { | ||
370 | struct pmu_perfmon_counter_v2 perfmon_counter_v2; | ||
371 | struct pmu_perfmon_counter_v0 perfmon_counter_v0; | ||
372 | }; | ||
373 | u32 perfmon_state_id[PMU_DOMAIN_GROUP_NUM]; | ||
374 | |||
375 | bool initialized; | ||
376 | |||
377 | void (*remove_support)(struct pmu_gk20a *pmu); | ||
378 | bool sw_ready; | ||
379 | bool perfmon_ready; | ||
380 | |||
381 | u32 sample_buffer; | ||
382 | u32 load_shadow; | ||
383 | u32 load_avg; | ||
384 | |||
385 | struct nvgpu_mutex isr_mutex; | ||
386 | bool isr_enabled; | ||
387 | |||
388 | bool zbc_ready; | ||
389 | union { | ||
390 | struct pmu_cmdline_args_v0 args_v0; | ||
391 | struct pmu_cmdline_args_v1 args_v1; | ||
392 | struct pmu_cmdline_args_v2 args_v2; | ||
393 | struct pmu_cmdline_args_v3 args_v3; | ||
394 | struct pmu_cmdline_args_v4 args_v4; | ||
395 | struct pmu_cmdline_args_v5 args_v5; | ||
396 | }; | ||
397 | unsigned long perfmon_events_cnt; | ||
398 | bool perfmon_sampling_enabled; | ||
399 | u8 pmu_mode; /*Added for GM20b, and ACR*/ | ||
400 | u32 falcon_id; | ||
401 | u32 aelpg_param[5]; | ||
402 | u32 override_done; | ||
403 | |||
404 | struct nvgpu_firmware *fw; | ||
405 | }; | ||
406 | |||
407 | int gk20a_init_pmu_support(struct gk20a *g); | 119 | int gk20a_init_pmu_support(struct gk20a *g); |
408 | int gk20a_init_pmu_bind_fecs(struct gk20a *g); | 120 | int gk20a_init_pmu_bind_fecs(struct gk20a *g); |
409 | 121 | ||
@@ -426,8 +138,8 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries); | |||
426 | 138 | ||
427 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); | 139 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); |
428 | 140 | ||
429 | int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token); | 141 | int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); |
430 | int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token); | 142 | int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); |
431 | int gk20a_pmu_destroy(struct gk20a *g); | 143 | int gk20a_pmu_destroy(struct gk20a *g); |
432 | int gk20a_pmu_load_norm(struct gk20a *g, u32 *load); | 144 | int gk20a_pmu_load_norm(struct gk20a *g, u32 *load); |
433 | int gk20a_pmu_load_update(struct gk20a *g); | 145 | int gk20a_pmu_load_update(struct gk20a *g); |
@@ -436,33 +148,33 @@ void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, | |||
436 | u32 *total_cycles); | 148 | u32 *total_cycles); |
437 | void gk20a_init_pmu_ops(struct gpu_ops *gops); | 149 | void gk20a_init_pmu_ops(struct gpu_ops *gops); |
438 | 150 | ||
439 | void pmu_copy_to_dmem(struct pmu_gk20a *pmu, | 151 | void pmu_copy_to_dmem(struct nvgpu_pmu *pmu, |
440 | u32 dst, u8 *src, u32 size, u8 port); | 152 | u32 dst, u8 *src, u32 size, u8 port); |
441 | void pmu_copy_from_dmem(struct pmu_gk20a *pmu, | 153 | void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, |
442 | u32 src, u8 *dst, u32 size, u8 port); | 154 | u32 src, u8 *dst, u32 size, u8 port); |
443 | int pmu_reset(struct pmu_gk20a *pmu); | 155 | int pmu_reset(struct nvgpu_pmu *pmu); |
444 | int pmu_bootstrap(struct pmu_gk20a *pmu); | 156 | int pmu_bootstrap(struct nvgpu_pmu *pmu); |
445 | int gk20a_init_pmu(struct pmu_gk20a *pmu); | 157 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); |
446 | void pmu_dump_falcon_stats(struct pmu_gk20a *pmu); | 158 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); |
447 | void gk20a_remove_pmu_support(struct pmu_gk20a *pmu); | 159 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu); |
448 | void pmu_seq_init(struct pmu_gk20a *pmu); | 160 | void pmu_seq_init(struct nvgpu_pmu *pmu); |
449 | 161 | ||
450 | int gk20a_init_pmu(struct pmu_gk20a *pmu); | 162 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); |
451 | 163 | ||
452 | int gk20a_pmu_ap_send_command(struct gk20a *g, | 164 | int gk20a_pmu_ap_send_command(struct gk20a *g, |
453 | union pmu_ap_cmd *p_ap_cmd, bool b_block); | 165 | union pmu_ap_cmd *p_ap_cmd, bool b_block); |
454 | int gk20a_aelpg_init(struct gk20a *g); | 166 | int gk20a_aelpg_init(struct gk20a *g); |
455 | int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id); | 167 | int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id); |
456 | void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable); | 168 | void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable); |
457 | int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, | 169 | int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, |
458 | u32 *var, u32 val); | 170 | u32 *var, u32 val); |
459 | void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, | 171 | void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, |
460 | void *param, u32 handle, u32 status); | 172 | void *param, u32 handle, u32 status); |
461 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, | 173 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, |
462 | struct pmu_pg_stats_data *pg_stat_data); | 174 | struct pmu_pg_stats_data *pg_stat_data); |
463 | int gk20a_pmu_reset(struct gk20a *g); | 175 | int gk20a_pmu_reset(struct gk20a *g); |
464 | int pmu_idle(struct pmu_gk20a *pmu); | 176 | int pmu_idle(struct nvgpu_pmu *pmu); |
465 | int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable); | 177 | int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable); |
466 | 178 | ||
467 | void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem); | 179 | void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem); |
468 | void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, | 180 | void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, |
@@ -475,7 +187,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, | |||
475 | u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data); | 187 | u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data); |
476 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); | 188 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); |
477 | 189 | ||
478 | int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu); | 190 | int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu); |
479 | int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu); | 191 | int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu); |
480 | 192 | ||
481 | #endif /*__PMU_GK20A_H__*/ | 193 | #endif /*__PMU_GK20A_H__*/ |