diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2017-05-10 11:05:24 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-06-05 02:05:18 -0400 |
commit | 673dd971600b26131c0afdb221e13c080da022fd (patch) | |
tree | 7c8416ac2ef61891812773d55c8c8dc61da824aa /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |
parent | 7668ccb2a2e4a8c13d82b427c65be79c725afe08 (diff) |
gpu: nvgpu: moved & renamed "struct pmu_gk20a"
- Renamed "struct pmu_gk20a" to "struct nvgpu_pmu" then moved
to file "pmu.h" under folder "drivers/gpu/nvgpu/include/nvgpu/"
- Included header file "pmu.h" to dependent file &
removed "pmu_gk20a.h" include if its usage is not present.
- Replaced "struct pmu_gk20a" with "struct nvgpu_pmu" in dependent
source & header files.
JIRA NVGPU-56
Change-Id: Ia3c606616831027093d5c216959c6a40d7c2632e
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: http://git-master/r/1479209
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 318 |
1 files changed, 159 insertions, 159 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index fc46db91..e74a5264 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -56,171 +56,171 @@ static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, | |||
56 | 56 | ||
57 | static int pmu_init_powergating(struct gk20a *g); | 57 | static int pmu_init_powergating(struct gk20a *g); |
58 | 58 | ||
59 | static u32 pmu_perfmon_cntr_sz_v0(struct pmu_gk20a *pmu) | 59 | static u32 pmu_perfmon_cntr_sz_v0(struct nvgpu_pmu *pmu) |
60 | { | 60 | { |
61 | return sizeof(struct pmu_perfmon_counter_v0); | 61 | return sizeof(struct pmu_perfmon_counter_v0); |
62 | } | 62 | } |
63 | 63 | ||
64 | static u32 pmu_perfmon_cntr_sz_v2(struct pmu_gk20a *pmu) | 64 | static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) |
65 | { | 65 | { |
66 | return sizeof(struct pmu_perfmon_counter_v2); | 66 | return sizeof(struct pmu_perfmon_counter_v2); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void *get_perfmon_cntr_ptr_v2(struct pmu_gk20a *pmu) | 69 | static void *get_perfmon_cntr_ptr_v2(struct nvgpu_pmu *pmu) |
70 | { | 70 | { |
71 | return (void *)(&pmu->perfmon_counter_v2); | 71 | return (void *)(&pmu->perfmon_counter_v2); |
72 | } | 72 | } |
73 | 73 | ||
74 | static void *get_perfmon_cntr_ptr_v0(struct pmu_gk20a *pmu) | 74 | static void *get_perfmon_cntr_ptr_v0(struct nvgpu_pmu *pmu) |
75 | { | 75 | { |
76 | return (void *)(&pmu->perfmon_counter_v0); | 76 | return (void *)(&pmu->perfmon_counter_v0); |
77 | } | 77 | } |
78 | 78 | ||
79 | static void set_perfmon_cntr_ut_v2(struct pmu_gk20a *pmu, u16 ut) | 79 | static void set_perfmon_cntr_ut_v2(struct nvgpu_pmu *pmu, u16 ut) |
80 | { | 80 | { |
81 | pmu->perfmon_counter_v2.upper_threshold = ut; | 81 | pmu->perfmon_counter_v2.upper_threshold = ut; |
82 | } | 82 | } |
83 | 83 | ||
84 | static void set_perfmon_cntr_ut_v0(struct pmu_gk20a *pmu, u16 ut) | 84 | static void set_perfmon_cntr_ut_v0(struct nvgpu_pmu *pmu, u16 ut) |
85 | { | 85 | { |
86 | pmu->perfmon_counter_v0.upper_threshold = ut; | 86 | pmu->perfmon_counter_v0.upper_threshold = ut; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void set_perfmon_cntr_lt_v2(struct pmu_gk20a *pmu, u16 lt) | 89 | static void set_perfmon_cntr_lt_v2(struct nvgpu_pmu *pmu, u16 lt) |
90 | { | 90 | { |
91 | pmu->perfmon_counter_v2.lower_threshold = lt; | 91 | pmu->perfmon_counter_v2.lower_threshold = lt; |
92 | } | 92 | } |
93 | 93 | ||
94 | static void set_perfmon_cntr_lt_v0(struct pmu_gk20a *pmu, u16 lt) | 94 | static void set_perfmon_cntr_lt_v0(struct nvgpu_pmu *pmu, u16 lt) |
95 | { | 95 | { |
96 | pmu->perfmon_counter_v0.lower_threshold = lt; | 96 | pmu->perfmon_counter_v0.lower_threshold = lt; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void set_perfmon_cntr_valid_v2(struct pmu_gk20a *pmu, u8 valid) | 99 | static void set_perfmon_cntr_valid_v2(struct nvgpu_pmu *pmu, u8 valid) |
100 | { | 100 | { |
101 | pmu->perfmon_counter_v2.valid = valid; | 101 | pmu->perfmon_counter_v2.valid = valid; |
102 | } | 102 | } |
103 | 103 | ||
104 | static void set_perfmon_cntr_valid_v0(struct pmu_gk20a *pmu, u8 valid) | 104 | static void set_perfmon_cntr_valid_v0(struct nvgpu_pmu *pmu, u8 valid) |
105 | { | 105 | { |
106 | pmu->perfmon_counter_v0.valid = valid; | 106 | pmu->perfmon_counter_v0.valid = valid; |
107 | } | 107 | } |
108 | 108 | ||
109 | static void set_perfmon_cntr_index_v2(struct pmu_gk20a *pmu, u8 index) | 109 | static void set_perfmon_cntr_index_v2(struct nvgpu_pmu *pmu, u8 index) |
110 | { | 110 | { |
111 | pmu->perfmon_counter_v2.index = index; | 111 | pmu->perfmon_counter_v2.index = index; |
112 | } | 112 | } |
113 | 113 | ||
114 | static void set_perfmon_cntr_index_v0(struct pmu_gk20a *pmu, u8 index) | 114 | static void set_perfmon_cntr_index_v0(struct nvgpu_pmu *pmu, u8 index) |
115 | { | 115 | { |
116 | pmu->perfmon_counter_v0.index = index; | 116 | pmu->perfmon_counter_v0.index = index; |
117 | } | 117 | } |
118 | 118 | ||
119 | static void set_perfmon_cntr_group_id_v2(struct pmu_gk20a *pmu, u8 gid) | 119 | static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid) |
120 | { | 120 | { |
121 | pmu->perfmon_counter_v2.group_id = gid; | 121 | pmu->perfmon_counter_v2.group_id = gid; |
122 | } | 122 | } |
123 | 123 | ||
124 | static void set_perfmon_cntr_group_id_v0(struct pmu_gk20a *pmu, u8 gid) | 124 | static void set_perfmon_cntr_group_id_v0(struct nvgpu_pmu *pmu, u8 gid) |
125 | { | 125 | { |
126 | pmu->perfmon_counter_v0.group_id = gid; | 126 | pmu->perfmon_counter_v0.group_id = gid; |
127 | } | 127 | } |
128 | 128 | ||
129 | static u32 pmu_cmdline_size_v0(struct pmu_gk20a *pmu) | 129 | static u32 pmu_cmdline_size_v0(struct nvgpu_pmu *pmu) |
130 | { | 130 | { |
131 | return sizeof(struct pmu_cmdline_args_v0); | 131 | return sizeof(struct pmu_cmdline_args_v0); |
132 | } | 132 | } |
133 | 133 | ||
134 | static u32 pmu_cmdline_size_v1(struct pmu_gk20a *pmu) | 134 | static u32 pmu_cmdline_size_v1(struct nvgpu_pmu *pmu) |
135 | { | 135 | { |
136 | return sizeof(struct pmu_cmdline_args_v1); | 136 | return sizeof(struct pmu_cmdline_args_v1); |
137 | } | 137 | } |
138 | 138 | ||
139 | static u32 pmu_cmdline_size_v2(struct pmu_gk20a *pmu) | 139 | static u32 pmu_cmdline_size_v2(struct nvgpu_pmu *pmu) |
140 | { | 140 | { |
141 | return sizeof(struct pmu_cmdline_args_v2); | 141 | return sizeof(struct pmu_cmdline_args_v2); |
142 | } | 142 | } |
143 | 143 | ||
144 | static void set_pmu_cmdline_args_cpufreq_v2(struct pmu_gk20a *pmu, u32 freq) | 144 | static void set_pmu_cmdline_args_cpufreq_v2(struct nvgpu_pmu *pmu, u32 freq) |
145 | { | 145 | { |
146 | pmu->args_v2.cpu_freq_hz = freq; | 146 | pmu->args_v2.cpu_freq_hz = freq; |
147 | } | 147 | } |
148 | static void set_pmu_cmdline_args_secure_mode_v2(struct pmu_gk20a *pmu, u32 val) | 148 | static void set_pmu_cmdline_args_secure_mode_v2(struct nvgpu_pmu *pmu, u32 val) |
149 | { | 149 | { |
150 | pmu->args_v2.secure_mode = val; | 150 | pmu->args_v2.secure_mode = val; |
151 | } | 151 | } |
152 | 152 | ||
153 | static void set_pmu_cmdline_args_falctracesize_v2( | 153 | static void set_pmu_cmdline_args_falctracesize_v2( |
154 | struct pmu_gk20a *pmu, u32 size) | 154 | struct nvgpu_pmu *pmu, u32 size) |
155 | { | 155 | { |
156 | pmu->args_v2.falc_trace_size = size; | 156 | pmu->args_v2.falc_trace_size = size; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void set_pmu_cmdline_args_falctracedmabase_v2(struct pmu_gk20a *pmu) | 159 | static void set_pmu_cmdline_args_falctracedmabase_v2(struct nvgpu_pmu *pmu) |
160 | { | 160 | { |
161 | pmu->args_v2.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | 161 | pmu->args_v2.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; |
162 | } | 162 | } |
163 | 163 | ||
164 | static void set_pmu_cmdline_args_falctracedmaidx_v2( | 164 | static void set_pmu_cmdline_args_falctracedmaidx_v2( |
165 | struct pmu_gk20a *pmu, u32 idx) | 165 | struct nvgpu_pmu *pmu, u32 idx) |
166 | { | 166 | { |
167 | pmu->args_v2.falc_trace_dma_idx = idx; | 167 | pmu->args_v2.falc_trace_dma_idx = idx; |
168 | } | 168 | } |
169 | 169 | ||
170 | 170 | ||
171 | static void set_pmu_cmdline_args_falctracedmabase_v4(struct pmu_gk20a *pmu) | 171 | static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) |
172 | { | 172 | { |
173 | pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | 173 | pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; |
174 | pmu->args_v4.dma_addr.dma_base1 = 0; | 174 | pmu->args_v4.dma_addr.dma_base1 = 0; |
175 | pmu->args_v4.dma_addr.dma_offset = 0; | 175 | pmu->args_v4.dma_addr.dma_offset = 0; |
176 | } | 176 | } |
177 | 177 | ||
178 | static u32 pmu_cmdline_size_v4(struct pmu_gk20a *pmu) | 178 | static u32 pmu_cmdline_size_v4(struct nvgpu_pmu *pmu) |
179 | { | 179 | { |
180 | return sizeof(struct pmu_cmdline_args_v4); | 180 | return sizeof(struct pmu_cmdline_args_v4); |
181 | } | 181 | } |
182 | 182 | ||
183 | static void set_pmu_cmdline_args_cpufreq_v4(struct pmu_gk20a *pmu, u32 freq) | 183 | static void set_pmu_cmdline_args_cpufreq_v4(struct nvgpu_pmu *pmu, u32 freq) |
184 | { | 184 | { |
185 | pmu->args_v4.cpu_freq_hz = freq; | 185 | pmu->args_v4.cpu_freq_hz = freq; |
186 | } | 186 | } |
187 | static void set_pmu_cmdline_args_secure_mode_v4(struct pmu_gk20a *pmu, u32 val) | 187 | static void set_pmu_cmdline_args_secure_mode_v4(struct nvgpu_pmu *pmu, u32 val) |
188 | { | 188 | { |
189 | pmu->args_v4.secure_mode = val; | 189 | pmu->args_v4.secure_mode = val; |
190 | } | 190 | } |
191 | 191 | ||
192 | static void set_pmu_cmdline_args_falctracesize_v4( | 192 | static void set_pmu_cmdline_args_falctracesize_v4( |
193 | struct pmu_gk20a *pmu, u32 size) | 193 | struct nvgpu_pmu *pmu, u32 size) |
194 | { | 194 | { |
195 | pmu->args_v4.falc_trace_size = size; | 195 | pmu->args_v4.falc_trace_size = size; |
196 | } | 196 | } |
197 | static void set_pmu_cmdline_args_falctracedmaidx_v4( | 197 | static void set_pmu_cmdline_args_falctracedmaidx_v4( |
198 | struct pmu_gk20a *pmu, u32 idx) | 198 | struct nvgpu_pmu *pmu, u32 idx) |
199 | { | 199 | { |
200 | pmu->args_v4.falc_trace_dma_idx = idx; | 200 | pmu->args_v4.falc_trace_dma_idx = idx; |
201 | } | 201 | } |
202 | 202 | ||
203 | static u32 pmu_cmdline_size_v5(struct pmu_gk20a *pmu) | 203 | static u32 pmu_cmdline_size_v5(struct nvgpu_pmu *pmu) |
204 | { | 204 | { |
205 | return sizeof(struct pmu_cmdline_args_v5); | 205 | return sizeof(struct pmu_cmdline_args_v5); |
206 | } | 206 | } |
207 | 207 | ||
208 | static void set_pmu_cmdline_args_cpufreq_v5(struct pmu_gk20a *pmu, u32 freq) | 208 | static void set_pmu_cmdline_args_cpufreq_v5(struct nvgpu_pmu *pmu, u32 freq) |
209 | { | 209 | { |
210 | pmu->args_v5.cpu_freq_hz = 204000000; | 210 | pmu->args_v5.cpu_freq_hz = 204000000; |
211 | } | 211 | } |
212 | static void set_pmu_cmdline_args_secure_mode_v5(struct pmu_gk20a *pmu, u32 val) | 212 | static void set_pmu_cmdline_args_secure_mode_v5(struct nvgpu_pmu *pmu, u32 val) |
213 | { | 213 | { |
214 | pmu->args_v5.secure_mode = val; | 214 | pmu->args_v5.secure_mode = val; |
215 | } | 215 | } |
216 | 216 | ||
217 | static void set_pmu_cmdline_args_falctracesize_v5( | 217 | static void set_pmu_cmdline_args_falctracesize_v5( |
218 | struct pmu_gk20a *pmu, u32 size) | 218 | struct nvgpu_pmu *pmu, u32 size) |
219 | { | 219 | { |
220 | /* set by surface describe */ | 220 | /* set by surface describe */ |
221 | } | 221 | } |
222 | 222 | ||
223 | static void set_pmu_cmdline_args_falctracedmabase_v5(struct pmu_gk20a *pmu) | 223 | static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu) |
224 | { | 224 | { |
225 | struct gk20a *g = gk20a_from_pmu(pmu); | 225 | struct gk20a *g = gk20a_from_pmu(pmu); |
226 | 226 | ||
@@ -228,53 +228,53 @@ static void set_pmu_cmdline_args_falctracedmabase_v5(struct pmu_gk20a *pmu) | |||
228 | } | 228 | } |
229 | 229 | ||
230 | static void set_pmu_cmdline_args_falctracedmaidx_v5( | 230 | static void set_pmu_cmdline_args_falctracedmaidx_v5( |
231 | struct pmu_gk20a *pmu, u32 idx) | 231 | struct nvgpu_pmu *pmu, u32 idx) |
232 | { | 232 | { |
233 | /* set by surface describe */ | 233 | /* set by surface describe */ |
234 | } | 234 | } |
235 | 235 | ||
236 | static u32 pmu_cmdline_size_v3(struct pmu_gk20a *pmu) | 236 | static u32 pmu_cmdline_size_v3(struct nvgpu_pmu *pmu) |
237 | { | 237 | { |
238 | return sizeof(struct pmu_cmdline_args_v3); | 238 | return sizeof(struct pmu_cmdline_args_v3); |
239 | } | 239 | } |
240 | 240 | ||
241 | static void set_pmu_cmdline_args_cpufreq_v3(struct pmu_gk20a *pmu, u32 freq) | 241 | static void set_pmu_cmdline_args_cpufreq_v3(struct nvgpu_pmu *pmu, u32 freq) |
242 | { | 242 | { |
243 | pmu->args_v3.cpu_freq_hz = freq; | 243 | pmu->args_v3.cpu_freq_hz = freq; |
244 | } | 244 | } |
245 | static void set_pmu_cmdline_args_secure_mode_v3(struct pmu_gk20a *pmu, u32 val) | 245 | static void set_pmu_cmdline_args_secure_mode_v3(struct nvgpu_pmu *pmu, u32 val) |
246 | { | 246 | { |
247 | pmu->args_v3.secure_mode = val; | 247 | pmu->args_v3.secure_mode = val; |
248 | } | 248 | } |
249 | 249 | ||
250 | static void set_pmu_cmdline_args_falctracesize_v3( | 250 | static void set_pmu_cmdline_args_falctracesize_v3( |
251 | struct pmu_gk20a *pmu, u32 size) | 251 | struct nvgpu_pmu *pmu, u32 size) |
252 | { | 252 | { |
253 | pmu->args_v3.falc_trace_size = size; | 253 | pmu->args_v3.falc_trace_size = size; |
254 | } | 254 | } |
255 | 255 | ||
256 | static void set_pmu_cmdline_args_falctracedmabase_v3(struct pmu_gk20a *pmu) | 256 | static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) |
257 | { | 257 | { |
258 | pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | 258 | pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; |
259 | } | 259 | } |
260 | 260 | ||
261 | static void set_pmu_cmdline_args_falctracedmaidx_v3( | 261 | static void set_pmu_cmdline_args_falctracedmaidx_v3( |
262 | struct pmu_gk20a *pmu, u32 idx) | 262 | struct nvgpu_pmu *pmu, u32 idx) |
263 | { | 263 | { |
264 | pmu->args_v3.falc_trace_dma_idx = idx; | 264 | pmu->args_v3.falc_trace_dma_idx = idx; |
265 | } | 265 | } |
266 | 266 | ||
267 | static void set_pmu_cmdline_args_cpufreq_v1(struct pmu_gk20a *pmu, u32 freq) | 267 | static void set_pmu_cmdline_args_cpufreq_v1(struct nvgpu_pmu *pmu, u32 freq) |
268 | { | 268 | { |
269 | pmu->args_v1.cpu_freq_hz = freq; | 269 | pmu->args_v1.cpu_freq_hz = freq; |
270 | } | 270 | } |
271 | static void set_pmu_cmdline_args_secure_mode_v1(struct pmu_gk20a *pmu, u32 val) | 271 | static void set_pmu_cmdline_args_secure_mode_v1(struct nvgpu_pmu *pmu, u32 val) |
272 | { | 272 | { |
273 | pmu->args_v1.secure_mode = val; | 273 | pmu->args_v1.secure_mode = val; |
274 | } | 274 | } |
275 | 275 | ||
276 | static void set_pmu_cmdline_args_falctracesize_v1( | 276 | static void set_pmu_cmdline_args_falctracesize_v1( |
277 | struct pmu_gk20a *pmu, u32 size) | 277 | struct nvgpu_pmu *pmu, u32 size) |
278 | { | 278 | { |
279 | pmu->args_v1.falc_trace_size = size; | 279 | pmu->args_v1.falc_trace_size = size; |
280 | } | 280 | } |
@@ -293,7 +293,7 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) | |||
293 | return false; | 293 | return false; |
294 | } | 294 | } |
295 | 295 | ||
296 | static void printtrace(struct pmu_gk20a *pmu) | 296 | static void printtrace(struct nvgpu_pmu *pmu) |
297 | { | 297 | { |
298 | u32 i = 0, j = 0, k, l, m, count; | 298 | u32 i = 0, j = 0, k, l, m, count; |
299 | char part_str[40], buf[0x40]; | 299 | char part_str[40], buf[0x40]; |
@@ -340,72 +340,72 @@ static void printtrace(struct pmu_gk20a *pmu) | |||
340 | nvgpu_kfree(g, tracebuffer); | 340 | nvgpu_kfree(g, tracebuffer); |
341 | } | 341 | } |
342 | 342 | ||
343 | static void set_pmu_cmdline_args_falctracedmabase_v1(struct pmu_gk20a *pmu) | 343 | static void set_pmu_cmdline_args_falctracedmabase_v1(struct nvgpu_pmu *pmu) |
344 | { | 344 | { |
345 | pmu->args_v1.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | 345 | pmu->args_v1.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; |
346 | } | 346 | } |
347 | 347 | ||
348 | static void set_pmu_cmdline_args_falctracedmaidx_v1( | 348 | static void set_pmu_cmdline_args_falctracedmaidx_v1( |
349 | struct pmu_gk20a *pmu, u32 idx) | 349 | struct nvgpu_pmu *pmu, u32 idx) |
350 | { | 350 | { |
351 | pmu->args_v1.falc_trace_dma_idx = idx; | 351 | pmu->args_v1.falc_trace_dma_idx = idx; |
352 | } | 352 | } |
353 | 353 | ||
354 | static void set_pmu_cmdline_args_cpufreq_v0(struct pmu_gk20a *pmu, u32 freq) | 354 | static void set_pmu_cmdline_args_cpufreq_v0(struct nvgpu_pmu *pmu, u32 freq) |
355 | { | 355 | { |
356 | pmu->args_v0.cpu_freq_hz = freq; | 356 | pmu->args_v0.cpu_freq_hz = freq; |
357 | } | 357 | } |
358 | 358 | ||
359 | static void *get_pmu_cmdline_args_ptr_v4(struct pmu_gk20a *pmu) | 359 | static void *get_pmu_cmdline_args_ptr_v4(struct nvgpu_pmu *pmu) |
360 | { | 360 | { |
361 | return (void *)(&pmu->args_v4); | 361 | return (void *)(&pmu->args_v4); |
362 | } | 362 | } |
363 | 363 | ||
364 | static void *get_pmu_cmdline_args_ptr_v3(struct pmu_gk20a *pmu) | 364 | static void *get_pmu_cmdline_args_ptr_v3(struct nvgpu_pmu *pmu) |
365 | { | 365 | { |
366 | return (void *)(&pmu->args_v3); | 366 | return (void *)(&pmu->args_v3); |
367 | } | 367 | } |
368 | 368 | ||
369 | static void *get_pmu_cmdline_args_ptr_v2(struct pmu_gk20a *pmu) | 369 | static void *get_pmu_cmdline_args_ptr_v2(struct nvgpu_pmu *pmu) |
370 | { | 370 | { |
371 | return (void *)(&pmu->args_v2); | 371 | return (void *)(&pmu->args_v2); |
372 | } | 372 | } |
373 | 373 | ||
374 | static void *get_pmu_cmdline_args_ptr_v5(struct pmu_gk20a *pmu) | 374 | static void *get_pmu_cmdline_args_ptr_v5(struct nvgpu_pmu *pmu) |
375 | { | 375 | { |
376 | return (void *)(&pmu->args_v5); | 376 | return (void *)(&pmu->args_v5); |
377 | } | 377 | } |
378 | static void *get_pmu_cmdline_args_ptr_v1(struct pmu_gk20a *pmu) | 378 | static void *get_pmu_cmdline_args_ptr_v1(struct nvgpu_pmu *pmu) |
379 | { | 379 | { |
380 | return (void *)(&pmu->args_v1); | 380 | return (void *)(&pmu->args_v1); |
381 | } | 381 | } |
382 | 382 | ||
383 | static void *get_pmu_cmdline_args_ptr_v0(struct pmu_gk20a *pmu) | 383 | static void *get_pmu_cmdline_args_ptr_v0(struct nvgpu_pmu *pmu) |
384 | { | 384 | { |
385 | return (void *)(&pmu->args_v0); | 385 | return (void *)(&pmu->args_v0); |
386 | } | 386 | } |
387 | 387 | ||
388 | static u32 get_pmu_allocation_size_v3(struct pmu_gk20a *pmu) | 388 | static u32 get_pmu_allocation_size_v3(struct nvgpu_pmu *pmu) |
389 | { | 389 | { |
390 | return sizeof(struct pmu_allocation_v3); | 390 | return sizeof(struct pmu_allocation_v3); |
391 | } | 391 | } |
392 | 392 | ||
393 | static u32 get_pmu_allocation_size_v2(struct pmu_gk20a *pmu) | 393 | static u32 get_pmu_allocation_size_v2(struct nvgpu_pmu *pmu) |
394 | { | 394 | { |
395 | return sizeof(struct pmu_allocation_v2); | 395 | return sizeof(struct pmu_allocation_v2); |
396 | } | 396 | } |
397 | 397 | ||
398 | static u32 get_pmu_allocation_size_v1(struct pmu_gk20a *pmu) | 398 | static u32 get_pmu_allocation_size_v1(struct nvgpu_pmu *pmu) |
399 | { | 399 | { |
400 | return sizeof(struct pmu_allocation_v1); | 400 | return sizeof(struct pmu_allocation_v1); |
401 | } | 401 | } |
402 | 402 | ||
403 | static u32 get_pmu_allocation_size_v0(struct pmu_gk20a *pmu) | 403 | static u32 get_pmu_allocation_size_v0(struct nvgpu_pmu *pmu) |
404 | { | 404 | { |
405 | return sizeof(struct pmu_allocation_v0); | 405 | return sizeof(struct pmu_allocation_v0); |
406 | } | 406 | } |
407 | 407 | ||
408 | static void set_pmu_allocation_ptr_v3(struct pmu_gk20a *pmu, | 408 | static void set_pmu_allocation_ptr_v3(struct nvgpu_pmu *pmu, |
409 | void **pmu_alloc_ptr, void *assign_ptr) | 409 | void **pmu_alloc_ptr, void *assign_ptr) |
410 | { | 410 | { |
411 | struct pmu_allocation_v3 **pmu_a_ptr = | 411 | struct pmu_allocation_v3 **pmu_a_ptr = |
@@ -413,7 +413,7 @@ static void set_pmu_allocation_ptr_v3(struct pmu_gk20a *pmu, | |||
413 | *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; | 413 | *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; |
414 | } | 414 | } |
415 | 415 | ||
416 | static void set_pmu_allocation_ptr_v2(struct pmu_gk20a *pmu, | 416 | static void set_pmu_allocation_ptr_v2(struct nvgpu_pmu *pmu, |
417 | void **pmu_alloc_ptr, void *assign_ptr) | 417 | void **pmu_alloc_ptr, void *assign_ptr) |
418 | { | 418 | { |
419 | struct pmu_allocation_v2 **pmu_a_ptr = | 419 | struct pmu_allocation_v2 **pmu_a_ptr = |
@@ -421,7 +421,7 @@ static void set_pmu_allocation_ptr_v2(struct pmu_gk20a *pmu, | |||
421 | *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; | 421 | *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; |
422 | } | 422 | } |
423 | 423 | ||
424 | static void set_pmu_allocation_ptr_v1(struct pmu_gk20a *pmu, | 424 | static void set_pmu_allocation_ptr_v1(struct nvgpu_pmu *pmu, |
425 | void **pmu_alloc_ptr, void *assign_ptr) | 425 | void **pmu_alloc_ptr, void *assign_ptr) |
426 | { | 426 | { |
427 | struct pmu_allocation_v1 **pmu_a_ptr = | 427 | struct pmu_allocation_v1 **pmu_a_ptr = |
@@ -429,7 +429,7 @@ static void set_pmu_allocation_ptr_v1(struct pmu_gk20a *pmu, | |||
429 | *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; | 429 | *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; |
430 | } | 430 | } |
431 | 431 | ||
432 | static void set_pmu_allocation_ptr_v0(struct pmu_gk20a *pmu, | 432 | static void set_pmu_allocation_ptr_v0(struct nvgpu_pmu *pmu, |
433 | void **pmu_alloc_ptr, void *assign_ptr) | 433 | void **pmu_alloc_ptr, void *assign_ptr) |
434 | { | 434 | { |
435 | struct pmu_allocation_v0 **pmu_a_ptr = | 435 | struct pmu_allocation_v0 **pmu_a_ptr = |
@@ -437,7 +437,7 @@ static void set_pmu_allocation_ptr_v0(struct pmu_gk20a *pmu, | |||
437 | *pmu_a_ptr = (struct pmu_allocation_v0 *)assign_ptr; | 437 | *pmu_a_ptr = (struct pmu_allocation_v0 *)assign_ptr; |
438 | } | 438 | } |
439 | 439 | ||
440 | static void pmu_allocation_set_dmem_size_v3(struct pmu_gk20a *pmu, | 440 | static void pmu_allocation_set_dmem_size_v3(struct nvgpu_pmu *pmu, |
441 | void *pmu_alloc_ptr, u16 size) | 441 | void *pmu_alloc_ptr, u16 size) |
442 | { | 442 | { |
443 | struct pmu_allocation_v3 *pmu_a_ptr = | 443 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -445,7 +445,7 @@ static void pmu_allocation_set_dmem_size_v3(struct pmu_gk20a *pmu, | |||
445 | pmu_a_ptr->alloc.dmem.size = size; | 445 | pmu_a_ptr->alloc.dmem.size = size; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void pmu_allocation_set_dmem_size_v2(struct pmu_gk20a *pmu, | 448 | static void pmu_allocation_set_dmem_size_v2(struct nvgpu_pmu *pmu, |
449 | void *pmu_alloc_ptr, u16 size) | 449 | void *pmu_alloc_ptr, u16 size) |
450 | { | 450 | { |
451 | struct pmu_allocation_v2 *pmu_a_ptr = | 451 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -453,7 +453,7 @@ static void pmu_allocation_set_dmem_size_v2(struct pmu_gk20a *pmu, | |||
453 | pmu_a_ptr->alloc.dmem.size = size; | 453 | pmu_a_ptr->alloc.dmem.size = size; |
454 | } | 454 | } |
455 | 455 | ||
456 | static void pmu_allocation_set_dmem_size_v1(struct pmu_gk20a *pmu, | 456 | static void pmu_allocation_set_dmem_size_v1(struct nvgpu_pmu *pmu, |
457 | void *pmu_alloc_ptr, u16 size) | 457 | void *pmu_alloc_ptr, u16 size) |
458 | { | 458 | { |
459 | struct pmu_allocation_v1 *pmu_a_ptr = | 459 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -461,7 +461,7 @@ static void pmu_allocation_set_dmem_size_v1(struct pmu_gk20a *pmu, | |||
461 | pmu_a_ptr->alloc.dmem.size = size; | 461 | pmu_a_ptr->alloc.dmem.size = size; |
462 | } | 462 | } |
463 | 463 | ||
464 | static void pmu_allocation_set_dmem_size_v0(struct pmu_gk20a *pmu, | 464 | static void pmu_allocation_set_dmem_size_v0(struct nvgpu_pmu *pmu, |
465 | void *pmu_alloc_ptr, u16 size) | 465 | void *pmu_alloc_ptr, u16 size) |
466 | { | 466 | { |
467 | struct pmu_allocation_v0 *pmu_a_ptr = | 467 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -469,7 +469,7 @@ static void pmu_allocation_set_dmem_size_v0(struct pmu_gk20a *pmu, | |||
469 | pmu_a_ptr->alloc.dmem.size = size; | 469 | pmu_a_ptr->alloc.dmem.size = size; |
470 | } | 470 | } |
471 | 471 | ||
472 | static u16 pmu_allocation_get_dmem_size_v3(struct pmu_gk20a *pmu, | 472 | static u16 pmu_allocation_get_dmem_size_v3(struct nvgpu_pmu *pmu, |
473 | void *pmu_alloc_ptr) | 473 | void *pmu_alloc_ptr) |
474 | { | 474 | { |
475 | struct pmu_allocation_v3 *pmu_a_ptr = | 475 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -477,7 +477,7 @@ static u16 pmu_allocation_get_dmem_size_v3(struct pmu_gk20a *pmu, | |||
477 | return pmu_a_ptr->alloc.dmem.size; | 477 | return pmu_a_ptr->alloc.dmem.size; |
478 | } | 478 | } |
479 | 479 | ||
480 | static u16 pmu_allocation_get_dmem_size_v2(struct pmu_gk20a *pmu, | 480 | static u16 pmu_allocation_get_dmem_size_v2(struct nvgpu_pmu *pmu, |
481 | void *pmu_alloc_ptr) | 481 | void *pmu_alloc_ptr) |
482 | { | 482 | { |
483 | struct pmu_allocation_v2 *pmu_a_ptr = | 483 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -485,7 +485,7 @@ static u16 pmu_allocation_get_dmem_size_v2(struct pmu_gk20a *pmu, | |||
485 | return pmu_a_ptr->alloc.dmem.size; | 485 | return pmu_a_ptr->alloc.dmem.size; |
486 | } | 486 | } |
487 | 487 | ||
488 | static u16 pmu_allocation_get_dmem_size_v1(struct pmu_gk20a *pmu, | 488 | static u16 pmu_allocation_get_dmem_size_v1(struct nvgpu_pmu *pmu, |
489 | void *pmu_alloc_ptr) | 489 | void *pmu_alloc_ptr) |
490 | { | 490 | { |
491 | struct pmu_allocation_v1 *pmu_a_ptr = | 491 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -493,7 +493,7 @@ static u16 pmu_allocation_get_dmem_size_v1(struct pmu_gk20a *pmu, | |||
493 | return pmu_a_ptr->alloc.dmem.size; | 493 | return pmu_a_ptr->alloc.dmem.size; |
494 | } | 494 | } |
495 | 495 | ||
496 | static u16 pmu_allocation_get_dmem_size_v0(struct pmu_gk20a *pmu, | 496 | static u16 pmu_allocation_get_dmem_size_v0(struct nvgpu_pmu *pmu, |
497 | void *pmu_alloc_ptr) | 497 | void *pmu_alloc_ptr) |
498 | { | 498 | { |
499 | struct pmu_allocation_v0 *pmu_a_ptr = | 499 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -501,7 +501,7 @@ static u16 pmu_allocation_get_dmem_size_v0(struct pmu_gk20a *pmu, | |||
501 | return pmu_a_ptr->alloc.dmem.size; | 501 | return pmu_a_ptr->alloc.dmem.size; |
502 | } | 502 | } |
503 | 503 | ||
504 | static u32 pmu_allocation_get_dmem_offset_v3(struct pmu_gk20a *pmu, | 504 | static u32 pmu_allocation_get_dmem_offset_v3(struct nvgpu_pmu *pmu, |
505 | void *pmu_alloc_ptr) | 505 | void *pmu_alloc_ptr) |
506 | { | 506 | { |
507 | struct pmu_allocation_v3 *pmu_a_ptr = | 507 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -509,7 +509,7 @@ static u32 pmu_allocation_get_dmem_offset_v3(struct pmu_gk20a *pmu, | |||
509 | return pmu_a_ptr->alloc.dmem.offset; | 509 | return pmu_a_ptr->alloc.dmem.offset; |
510 | } | 510 | } |
511 | 511 | ||
512 | static u32 pmu_allocation_get_dmem_offset_v2(struct pmu_gk20a *pmu, | 512 | static u32 pmu_allocation_get_dmem_offset_v2(struct nvgpu_pmu *pmu, |
513 | void *pmu_alloc_ptr) | 513 | void *pmu_alloc_ptr) |
514 | { | 514 | { |
515 | struct pmu_allocation_v2 *pmu_a_ptr = | 515 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -517,7 +517,7 @@ static u32 pmu_allocation_get_dmem_offset_v2(struct pmu_gk20a *pmu, | |||
517 | return pmu_a_ptr->alloc.dmem.offset; | 517 | return pmu_a_ptr->alloc.dmem.offset; |
518 | } | 518 | } |
519 | 519 | ||
520 | static u32 pmu_allocation_get_dmem_offset_v1(struct pmu_gk20a *pmu, | 520 | static u32 pmu_allocation_get_dmem_offset_v1(struct nvgpu_pmu *pmu, |
521 | void *pmu_alloc_ptr) | 521 | void *pmu_alloc_ptr) |
522 | { | 522 | { |
523 | struct pmu_allocation_v1 *pmu_a_ptr = | 523 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -525,7 +525,7 @@ static u32 pmu_allocation_get_dmem_offset_v1(struct pmu_gk20a *pmu, | |||
525 | return pmu_a_ptr->alloc.dmem.offset; | 525 | return pmu_a_ptr->alloc.dmem.offset; |
526 | } | 526 | } |
527 | 527 | ||
528 | static u32 pmu_allocation_get_dmem_offset_v0(struct pmu_gk20a *pmu, | 528 | static u32 pmu_allocation_get_dmem_offset_v0(struct nvgpu_pmu *pmu, |
529 | void *pmu_alloc_ptr) | 529 | void *pmu_alloc_ptr) |
530 | { | 530 | { |
531 | struct pmu_allocation_v0 *pmu_a_ptr = | 531 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -533,7 +533,7 @@ static u32 pmu_allocation_get_dmem_offset_v0(struct pmu_gk20a *pmu, | |||
533 | return pmu_a_ptr->alloc.dmem.offset; | 533 | return pmu_a_ptr->alloc.dmem.offset; |
534 | } | 534 | } |
535 | 535 | ||
536 | static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct pmu_gk20a *pmu, | 536 | static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct nvgpu_pmu *pmu, |
537 | void *pmu_alloc_ptr) | 537 | void *pmu_alloc_ptr) |
538 | { | 538 | { |
539 | struct pmu_allocation_v3 *pmu_a_ptr = | 539 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -542,7 +542,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct pmu_gk20a *pmu, | |||
542 | } | 542 | } |
543 | 543 | ||
544 | static void *pmu_allocation_get_fb_addr_v3( | 544 | static void *pmu_allocation_get_fb_addr_v3( |
545 | struct pmu_gk20a *pmu, void *pmu_alloc_ptr) | 545 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) |
546 | { | 546 | { |
547 | struct pmu_allocation_v3 *pmu_a_ptr = | 547 | struct pmu_allocation_v3 *pmu_a_ptr = |
548 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | 548 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; |
@@ -550,14 +550,14 @@ static void *pmu_allocation_get_fb_addr_v3( | |||
550 | } | 550 | } |
551 | 551 | ||
552 | static u32 pmu_allocation_get_fb_size_v3( | 552 | static u32 pmu_allocation_get_fb_size_v3( |
553 | struct pmu_gk20a *pmu, void *pmu_alloc_ptr) | 553 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) |
554 | { | 554 | { |
555 | struct pmu_allocation_v3 *pmu_a_ptr = | 555 | struct pmu_allocation_v3 *pmu_a_ptr = |
556 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | 556 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; |
557 | return sizeof(pmu_a_ptr->alloc.fb); | 557 | return sizeof(pmu_a_ptr->alloc.fb); |
558 | } | 558 | } |
559 | 559 | ||
560 | static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct pmu_gk20a *pmu, | 560 | static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct nvgpu_pmu *pmu, |
561 | void *pmu_alloc_ptr) | 561 | void *pmu_alloc_ptr) |
562 | { | 562 | { |
563 | struct pmu_allocation_v2 *pmu_a_ptr = | 563 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -565,7 +565,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct pmu_gk20a *pmu, | |||
565 | return &pmu_a_ptr->alloc.dmem.offset; | 565 | return &pmu_a_ptr->alloc.dmem.offset; |
566 | } | 566 | } |
567 | 567 | ||
568 | static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct pmu_gk20a *pmu, | 568 | static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct nvgpu_pmu *pmu, |
569 | void *pmu_alloc_ptr) | 569 | void *pmu_alloc_ptr) |
570 | { | 570 | { |
571 | struct pmu_allocation_v1 *pmu_a_ptr = | 571 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -573,7 +573,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct pmu_gk20a *pmu, | |||
573 | return &pmu_a_ptr->alloc.dmem.offset; | 573 | return &pmu_a_ptr->alloc.dmem.offset; |
574 | } | 574 | } |
575 | 575 | ||
576 | static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct pmu_gk20a *pmu, | 576 | static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct nvgpu_pmu *pmu, |
577 | void *pmu_alloc_ptr) | 577 | void *pmu_alloc_ptr) |
578 | { | 578 | { |
579 | struct pmu_allocation_v0 *pmu_a_ptr = | 579 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -581,7 +581,7 @@ static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct pmu_gk20a *pmu, | |||
581 | return &pmu_a_ptr->alloc.dmem.offset; | 581 | return &pmu_a_ptr->alloc.dmem.offset; |
582 | } | 582 | } |
583 | 583 | ||
584 | static void pmu_allocation_set_dmem_offset_v3(struct pmu_gk20a *pmu, | 584 | static void pmu_allocation_set_dmem_offset_v3(struct nvgpu_pmu *pmu, |
585 | void *pmu_alloc_ptr, u32 offset) | 585 | void *pmu_alloc_ptr, u32 offset) |
586 | { | 586 | { |
587 | struct pmu_allocation_v3 *pmu_a_ptr = | 587 | struct pmu_allocation_v3 *pmu_a_ptr = |
@@ -589,7 +589,7 @@ static void pmu_allocation_set_dmem_offset_v3(struct pmu_gk20a *pmu, | |||
589 | pmu_a_ptr->alloc.dmem.offset = offset; | 589 | pmu_a_ptr->alloc.dmem.offset = offset; |
590 | } | 590 | } |
591 | 591 | ||
592 | static void pmu_allocation_set_dmem_offset_v2(struct pmu_gk20a *pmu, | 592 | static void pmu_allocation_set_dmem_offset_v2(struct nvgpu_pmu *pmu, |
593 | void *pmu_alloc_ptr, u32 offset) | 593 | void *pmu_alloc_ptr, u32 offset) |
594 | { | 594 | { |
595 | struct pmu_allocation_v2 *pmu_a_ptr = | 595 | struct pmu_allocation_v2 *pmu_a_ptr = |
@@ -597,7 +597,7 @@ static void pmu_allocation_set_dmem_offset_v2(struct pmu_gk20a *pmu, | |||
597 | pmu_a_ptr->alloc.dmem.offset = offset; | 597 | pmu_a_ptr->alloc.dmem.offset = offset; |
598 | } | 598 | } |
599 | 599 | ||
600 | static void pmu_allocation_set_dmem_offset_v1(struct pmu_gk20a *pmu, | 600 | static void pmu_allocation_set_dmem_offset_v1(struct nvgpu_pmu *pmu, |
601 | void *pmu_alloc_ptr, u32 offset) | 601 | void *pmu_alloc_ptr, u32 offset) |
602 | { | 602 | { |
603 | struct pmu_allocation_v1 *pmu_a_ptr = | 603 | struct pmu_allocation_v1 *pmu_a_ptr = |
@@ -605,7 +605,7 @@ static void pmu_allocation_set_dmem_offset_v1(struct pmu_gk20a *pmu, | |||
605 | pmu_a_ptr->alloc.dmem.offset = offset; | 605 | pmu_a_ptr->alloc.dmem.offset = offset; |
606 | } | 606 | } |
607 | 607 | ||
608 | static void pmu_allocation_set_dmem_offset_v0(struct pmu_gk20a *pmu, | 608 | static void pmu_allocation_set_dmem_offset_v0(struct nvgpu_pmu *pmu, |
609 | void *pmu_alloc_ptr, u32 offset) | 609 | void *pmu_alloc_ptr, u32 offset) |
610 | { | 610 | { |
611 | struct pmu_allocation_v0 *pmu_a_ptr = | 611 | struct pmu_allocation_v0 *pmu_a_ptr = |
@@ -1421,7 +1421,7 @@ static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg, | |||
1421 | pg->eng_buf_load_v2.dma_desc.params |= (value << 24); | 1421 | pg->eng_buf_load_v2.dma_desc.params |= (value << 24); |
1422 | } | 1422 | } |
1423 | 1423 | ||
1424 | int gk20a_init_pmu(struct pmu_gk20a *pmu) | 1424 | int gk20a_init_pmu(struct nvgpu_pmu *pmu) |
1425 | { | 1425 | { |
1426 | struct gk20a *g = gk20a_from_pmu(pmu); | 1426 | struct gk20a *g = gk20a_from_pmu(pmu); |
1427 | struct pmu_v *pv = &g->ops.pmu_ver; | 1427 | struct pmu_v *pv = &g->ops.pmu_ver; |
@@ -2214,7 +2214,7 @@ fail_elpg: | |||
2214 | return err; | 2214 | return err; |
2215 | } | 2215 | } |
2216 | 2216 | ||
2217 | void pmu_copy_from_dmem(struct pmu_gk20a *pmu, | 2217 | void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, |
2218 | u32 src, u8 *dst, u32 size, u8 port) | 2218 | u32 src, u8 *dst, u32 size, u8 port) |
2219 | { | 2219 | { |
2220 | struct gk20a *g = gk20a_from_pmu(pmu); | 2220 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2258,7 +2258,7 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu, | |||
2258 | return; | 2258 | return; |
2259 | } | 2259 | } |
2260 | 2260 | ||
2261 | void pmu_copy_to_dmem(struct pmu_gk20a *pmu, | 2261 | void pmu_copy_to_dmem(struct nvgpu_pmu *pmu, |
2262 | u32 dst, u8 *src, u32 size, u8 port) | 2262 | u32 dst, u8 *src, u32 size, u8 port) |
2263 | { | 2263 | { |
2264 | struct gk20a *g = gk20a_from_pmu(pmu); | 2264 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2309,7 +2309,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu, | |||
2309 | return; | 2309 | return; |
2310 | } | 2310 | } |
2311 | 2311 | ||
2312 | int pmu_idle(struct pmu_gk20a *pmu) | 2312 | int pmu_idle(struct nvgpu_pmu *pmu) |
2313 | { | 2313 | { |
2314 | struct gk20a *g = gk20a_from_pmu(pmu); | 2314 | struct gk20a *g = gk20a_from_pmu(pmu); |
2315 | struct nvgpu_timeout timeout; | 2315 | struct nvgpu_timeout timeout; |
@@ -2338,7 +2338,7 @@ int pmu_idle(struct pmu_gk20a *pmu) | |||
2338 | return 0; | 2338 | return 0; |
2339 | } | 2339 | } |
2340 | 2340 | ||
2341 | void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable) | 2341 | void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) |
2342 | { | 2342 | { |
2343 | struct gk20a *g = gk20a_from_pmu(pmu); | 2343 | struct gk20a *g = gk20a_from_pmu(pmu); |
2344 | 2344 | ||
@@ -2400,7 +2400,7 @@ void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable) | |||
2400 | gk20a_dbg_fn("done"); | 2400 | gk20a_dbg_fn("done"); |
2401 | } | 2401 | } |
2402 | 2402 | ||
2403 | int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) | 2403 | int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable) |
2404 | { | 2404 | { |
2405 | struct gk20a *g = gk20a_from_pmu(pmu); | 2405 | struct gk20a *g = gk20a_from_pmu(pmu); |
2406 | struct nvgpu_timeout timeout; | 2406 | struct nvgpu_timeout timeout; |
@@ -2443,7 +2443,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable) | |||
2443 | } | 2443 | } |
2444 | } | 2444 | } |
2445 | 2445 | ||
2446 | static int pmu_enable(struct pmu_gk20a *pmu, bool enable) | 2446 | static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) |
2447 | { | 2447 | { |
2448 | struct gk20a *g = gk20a_from_pmu(pmu); | 2448 | struct gk20a *g = gk20a_from_pmu(pmu); |
2449 | u32 pmc_enable; | 2449 | u32 pmc_enable; |
@@ -2477,7 +2477,7 @@ static int pmu_enable(struct pmu_gk20a *pmu, bool enable) | |||
2477 | return 0; | 2477 | return 0; |
2478 | } | 2478 | } |
2479 | 2479 | ||
2480 | int pmu_reset(struct pmu_gk20a *pmu) | 2480 | int pmu_reset(struct nvgpu_pmu *pmu) |
2481 | { | 2481 | { |
2482 | int err; | 2482 | int err; |
2483 | 2483 | ||
@@ -2502,7 +2502,7 @@ int pmu_reset(struct pmu_gk20a *pmu) | |||
2502 | return 0; | 2502 | return 0; |
2503 | } | 2503 | } |
2504 | 2504 | ||
2505 | int pmu_bootstrap(struct pmu_gk20a *pmu) | 2505 | int pmu_bootstrap(struct nvgpu_pmu *pmu) |
2506 | { | 2506 | { |
2507 | struct gk20a *g = gk20a_from_pmu(pmu); | 2507 | struct gk20a *g = gk20a_from_pmu(pmu); |
2508 | struct mm_gk20a *mm = &g->mm; | 2508 | struct mm_gk20a *mm = &g->mm; |
@@ -2593,7 +2593,7 @@ int pmu_bootstrap(struct pmu_gk20a *pmu) | |||
2593 | return 0; | 2593 | return 0; |
2594 | } | 2594 | } |
2595 | 2595 | ||
2596 | void pmu_seq_init(struct pmu_gk20a *pmu) | 2596 | void pmu_seq_init(struct nvgpu_pmu *pmu) |
2597 | { | 2597 | { |
2598 | u32 i; | 2598 | u32 i; |
2599 | 2599 | ||
@@ -2606,7 +2606,7 @@ void pmu_seq_init(struct pmu_gk20a *pmu) | |||
2606 | pmu->seq[i].id = i; | 2606 | pmu->seq[i].id = i; |
2607 | } | 2607 | } |
2608 | 2608 | ||
2609 | static int pmu_seq_acquire(struct pmu_gk20a *pmu, | 2609 | static int pmu_seq_acquire(struct nvgpu_pmu *pmu, |
2610 | struct pmu_sequence **pseq) | 2610 | struct pmu_sequence **pseq) |
2611 | { | 2611 | { |
2612 | struct gk20a *g = gk20a_from_pmu(pmu); | 2612 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2631,7 +2631,7 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu, | |||
2631 | return 0; | 2631 | return 0; |
2632 | } | 2632 | } |
2633 | 2633 | ||
2634 | static void pmu_seq_release(struct pmu_gk20a *pmu, | 2634 | static void pmu_seq_release(struct nvgpu_pmu *pmu, |
2635 | struct pmu_sequence *seq) | 2635 | struct pmu_sequence *seq) |
2636 | { | 2636 | { |
2637 | struct gk20a *g = gk20a_from_pmu(pmu); | 2637 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2649,7 +2649,7 @@ static void pmu_seq_release(struct pmu_gk20a *pmu, | |||
2649 | clear_bit(seq->id, pmu->pmu_seq_tbl); | 2649 | clear_bit(seq->id, pmu->pmu_seq_tbl); |
2650 | } | 2650 | } |
2651 | 2651 | ||
2652 | static int pmu_queue_init(struct pmu_gk20a *pmu, | 2652 | static int pmu_queue_init(struct nvgpu_pmu *pmu, |
2653 | u32 id, union pmu_init_msg_pmu *init) | 2653 | u32 id, union pmu_init_msg_pmu *init) |
2654 | { | 2654 | { |
2655 | struct gk20a *g = gk20a_from_pmu(pmu); | 2655 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2670,7 +2670,7 @@ static int pmu_queue_init(struct pmu_gk20a *pmu, | |||
2670 | return 0; | 2670 | return 0; |
2671 | } | 2671 | } |
2672 | 2672 | ||
2673 | static int pmu_queue_head(struct pmu_gk20a *pmu, struct pmu_queue *queue, | 2673 | static int pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, |
2674 | u32 *head, bool set) | 2674 | u32 *head, bool set) |
2675 | { | 2675 | { |
2676 | struct gk20a *g = gk20a_from_pmu(pmu); | 2676 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2707,7 +2707,7 @@ static int pmu_queue_head(struct pmu_gk20a *pmu, struct pmu_queue *queue, | |||
2707 | return 0; | 2707 | return 0; |
2708 | } | 2708 | } |
2709 | 2709 | ||
2710 | static int pmu_queue_tail(struct pmu_gk20a *pmu, struct pmu_queue *queue, | 2710 | static int pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, |
2711 | u32 *tail, bool set) | 2711 | u32 *tail, bool set) |
2712 | { | 2712 | { |
2713 | struct gk20a *g = gk20a_from_pmu(pmu); | 2713 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -2745,19 +2745,19 @@ static int pmu_queue_tail(struct pmu_gk20a *pmu, struct pmu_queue *queue, | |||
2745 | return 0; | 2745 | return 0; |
2746 | } | 2746 | } |
2747 | 2747 | ||
2748 | static inline void pmu_queue_read(struct pmu_gk20a *pmu, | 2748 | static inline void pmu_queue_read(struct nvgpu_pmu *pmu, |
2749 | u32 offset, u8 *dst, u32 size) | 2749 | u32 offset, u8 *dst, u32 size) |
2750 | { | 2750 | { |
2751 | pmu_copy_from_dmem(pmu, offset, dst, size, 0); | 2751 | pmu_copy_from_dmem(pmu, offset, dst, size, 0); |
2752 | } | 2752 | } |
2753 | 2753 | ||
2754 | static inline void pmu_queue_write(struct pmu_gk20a *pmu, | 2754 | static inline void pmu_queue_write(struct nvgpu_pmu *pmu, |
2755 | u32 offset, u8 *src, u32 size) | 2755 | u32 offset, u8 *src, u32 size) |
2756 | { | 2756 | { |
2757 | pmu_copy_to_dmem(pmu, offset, src, size, 0); | 2757 | pmu_copy_to_dmem(pmu, offset, src, size, 0); |
2758 | } | 2758 | } |
2759 | 2759 | ||
2760 | int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token) | 2760 | int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) |
2761 | { | 2761 | { |
2762 | struct gk20a *g = gk20a_from_pmu(pmu); | 2762 | struct gk20a *g = gk20a_from_pmu(pmu); |
2763 | struct pmu_mutex *mutex; | 2763 | struct pmu_mutex *mutex; |
@@ -2826,7 +2826,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token) | |||
2826 | return -EBUSY; | 2826 | return -EBUSY; |
2827 | } | 2827 | } |
2828 | 2828 | ||
2829 | int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token) | 2829 | int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) |
2830 | { | 2830 | { |
2831 | struct gk20a *g = gk20a_from_pmu(pmu); | 2831 | struct gk20a *g = gk20a_from_pmu(pmu); |
2832 | struct pmu_mutex *mutex; | 2832 | struct pmu_mutex *mutex; |
@@ -2867,7 +2867,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token) | |||
2867 | return 0; | 2867 | return 0; |
2868 | } | 2868 | } |
2869 | 2869 | ||
2870 | static int pmu_queue_lock(struct pmu_gk20a *pmu, | 2870 | static int pmu_queue_lock(struct nvgpu_pmu *pmu, |
2871 | struct pmu_queue *queue) | 2871 | struct pmu_queue *queue) |
2872 | { | 2872 | { |
2873 | int err; | 2873 | int err; |
@@ -2884,7 +2884,7 @@ static int pmu_queue_lock(struct pmu_gk20a *pmu, | |||
2884 | return err; | 2884 | return err; |
2885 | } | 2885 | } |
2886 | 2886 | ||
2887 | static int pmu_queue_unlock(struct pmu_gk20a *pmu, | 2887 | static int pmu_queue_unlock(struct nvgpu_pmu *pmu, |
2888 | struct pmu_queue *queue) | 2888 | struct pmu_queue *queue) |
2889 | { | 2889 | { |
2890 | int err; | 2890 | int err; |
@@ -2902,7 +2902,7 @@ static int pmu_queue_unlock(struct pmu_gk20a *pmu, | |||
2902 | } | 2902 | } |
2903 | 2903 | ||
2904 | /* called by pmu_read_message, no lock */ | 2904 | /* called by pmu_read_message, no lock */ |
2905 | static bool pmu_queue_is_empty(struct pmu_gk20a *pmu, | 2905 | static bool pmu_queue_is_empty(struct nvgpu_pmu *pmu, |
2906 | struct pmu_queue *queue) | 2906 | struct pmu_queue *queue) |
2907 | { | 2907 | { |
2908 | u32 head, tail; | 2908 | u32 head, tail; |
@@ -2916,7 +2916,7 @@ static bool pmu_queue_is_empty(struct pmu_gk20a *pmu, | |||
2916 | return head == tail; | 2916 | return head == tail; |
2917 | } | 2917 | } |
2918 | 2918 | ||
2919 | static bool pmu_queue_has_room(struct pmu_gk20a *pmu, | 2919 | static bool pmu_queue_has_room(struct nvgpu_pmu *pmu, |
2920 | struct pmu_queue *queue, u32 size, bool *need_rewind) | 2920 | struct pmu_queue *queue, u32 size, bool *need_rewind) |
2921 | { | 2921 | { |
2922 | u32 head, tail; | 2922 | u32 head, tail; |
@@ -2946,7 +2946,7 @@ static bool pmu_queue_has_room(struct pmu_gk20a *pmu, | |||
2946 | return size <= free; | 2946 | return size <= free; |
2947 | } | 2947 | } |
2948 | 2948 | ||
2949 | static int pmu_queue_push(struct pmu_gk20a *pmu, | 2949 | static int pmu_queue_push(struct nvgpu_pmu *pmu, |
2950 | struct pmu_queue *queue, void *data, u32 size) | 2950 | struct pmu_queue *queue, void *data, u32 size) |
2951 | { | 2951 | { |
2952 | 2952 | ||
@@ -2962,7 +2962,7 @@ static int pmu_queue_push(struct pmu_gk20a *pmu, | |||
2962 | return 0; | 2962 | return 0; |
2963 | } | 2963 | } |
2964 | 2964 | ||
2965 | static int pmu_queue_pop(struct pmu_gk20a *pmu, | 2965 | static int pmu_queue_pop(struct nvgpu_pmu *pmu, |
2966 | struct pmu_queue *queue, void *data, u32 size, | 2966 | struct pmu_queue *queue, void *data, u32 size, |
2967 | u32 *bytes_read) | 2967 | u32 *bytes_read) |
2968 | { | 2968 | { |
@@ -2998,7 +2998,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu, | |||
2998 | return 0; | 2998 | return 0; |
2999 | } | 2999 | } |
3000 | 3000 | ||
3001 | static void pmu_queue_rewind(struct pmu_gk20a *pmu, | 3001 | static void pmu_queue_rewind(struct nvgpu_pmu *pmu, |
3002 | struct pmu_queue *queue) | 3002 | struct pmu_queue *queue) |
3003 | { | 3003 | { |
3004 | struct pmu_cmd cmd; | 3004 | struct pmu_cmd cmd; |
@@ -3022,7 +3022,7 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu, | |||
3022 | } | 3022 | } |
3023 | 3023 | ||
3024 | /* open for read and lock the queue */ | 3024 | /* open for read and lock the queue */ |
3025 | static int pmu_queue_open_read(struct pmu_gk20a *pmu, | 3025 | static int pmu_queue_open_read(struct nvgpu_pmu *pmu, |
3026 | struct pmu_queue *queue) | 3026 | struct pmu_queue *queue) |
3027 | { | 3027 | { |
3028 | int err; | 3028 | int err; |
@@ -3043,7 +3043,7 @@ static int pmu_queue_open_read(struct pmu_gk20a *pmu, | |||
3043 | 3043 | ||
3044 | /* open for write and lock the queue | 3044 | /* open for write and lock the queue |
3045 | make sure there's enough free space for the write */ | 3045 | make sure there's enough free space for the write */ |
3046 | static int pmu_queue_open_write(struct pmu_gk20a *pmu, | 3046 | static int pmu_queue_open_write(struct nvgpu_pmu *pmu, |
3047 | struct pmu_queue *queue, u32 size) | 3047 | struct pmu_queue *queue, u32 size) |
3048 | { | 3048 | { |
3049 | bool rewind = false; | 3049 | bool rewind = false; |
@@ -3074,7 +3074,7 @@ static int pmu_queue_open_write(struct pmu_gk20a *pmu, | |||
3074 | } | 3074 | } |
3075 | 3075 | ||
3076 | /* close and unlock the queue */ | 3076 | /* close and unlock the queue */ |
3077 | static int pmu_queue_close(struct pmu_gk20a *pmu, | 3077 | static int pmu_queue_close(struct nvgpu_pmu *pmu, |
3078 | struct pmu_queue *queue, bool commit) | 3078 | struct pmu_queue *queue, bool commit) |
3079 | { | 3079 | { |
3080 | if (!queue->opened) | 3080 | if (!queue->opened) |
@@ -3098,7 +3098,7 @@ static int pmu_queue_close(struct pmu_gk20a *pmu, | |||
3098 | return 0; | 3098 | return 0; |
3099 | } | 3099 | } |
3100 | 3100 | ||
3101 | void gk20a_remove_pmu_support(struct pmu_gk20a *pmu) | 3101 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu) |
3102 | { | 3102 | { |
3103 | struct gk20a *g = gk20a_from_pmu(pmu); | 3103 | struct gk20a *g = gk20a_from_pmu(pmu); |
3104 | 3104 | ||
@@ -3118,7 +3118,7 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu) | |||
3118 | 3118 | ||
3119 | static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) | 3119 | static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) |
3120 | { | 3120 | { |
3121 | struct pmu_gk20a *pmu = &g->pmu; | 3121 | struct nvgpu_pmu *pmu = &g->pmu; |
3122 | 3122 | ||
3123 | gk20a_dbg_fn(""); | 3123 | gk20a_dbg_fn(""); |
3124 | 3124 | ||
@@ -3129,7 +3129,7 @@ static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) | |||
3129 | 3129 | ||
3130 | static int gk20a_prepare_ucode(struct gk20a *g) | 3130 | static int gk20a_prepare_ucode(struct gk20a *g) |
3131 | { | 3131 | { |
3132 | struct pmu_gk20a *pmu = &g->pmu; | 3132 | struct nvgpu_pmu *pmu = &g->pmu; |
3133 | int err = 0; | 3133 | int err = 0; |
3134 | struct mm_gk20a *mm = &g->mm; | 3134 | struct mm_gk20a *mm = &g->mm; |
3135 | struct vm_gk20a *vm = &mm->pmu.vm; | 3135 | struct vm_gk20a *vm = &mm->pmu.vm; |
@@ -3168,7 +3168,7 @@ static int gk20a_prepare_ucode(struct gk20a *g) | |||
3168 | 3168 | ||
3169 | static int gk20a_init_pmu_setup_sw(struct gk20a *g) | 3169 | static int gk20a_init_pmu_setup_sw(struct gk20a *g) |
3170 | { | 3170 | { |
3171 | struct pmu_gk20a *pmu = &g->pmu; | 3171 | struct nvgpu_pmu *pmu = &g->pmu; |
3172 | struct mm_gk20a *mm = &g->mm; | 3172 | struct mm_gk20a *mm = &g->mm; |
3173 | struct vm_gk20a *vm = &mm->pmu.vm; | 3173 | struct vm_gk20a *vm = &mm->pmu.vm; |
3174 | unsigned int i; | 3174 | unsigned int i; |
@@ -3266,7 +3266,7 @@ skip_init: | |||
3266 | static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, | 3266 | static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, |
3267 | void *param, u32 handle, u32 status) | 3267 | void *param, u32 handle, u32 status) |
3268 | { | 3268 | { |
3269 | struct pmu_gk20a *pmu = param; | 3269 | struct nvgpu_pmu *pmu = param; |
3270 | struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat; | 3270 | struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat; |
3271 | 3271 | ||
3272 | gk20a_dbg_fn(""); | 3272 | gk20a_dbg_fn(""); |
@@ -3289,7 +3289,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, | |||
3289 | 3289 | ||
3290 | static int gk20a_init_pmu_setup_hw1(struct gk20a *g) | 3290 | static int gk20a_init_pmu_setup_hw1(struct gk20a *g) |
3291 | { | 3291 | { |
3292 | struct pmu_gk20a *pmu = &g->pmu; | 3292 | struct nvgpu_pmu *pmu = &g->pmu; |
3293 | int err = 0; | 3293 | int err = 0; |
3294 | 3294 | ||
3295 | gk20a_dbg_fn(""); | 3295 | gk20a_dbg_fn(""); |
@@ -3327,7 +3327,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g); | |||
3327 | static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, | 3327 | static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, |
3328 | bool post_change_event) | 3328 | bool post_change_event) |
3329 | { | 3329 | { |
3330 | struct pmu_gk20a *pmu = &g->pmu; | 3330 | struct nvgpu_pmu *pmu = &g->pmu; |
3331 | 3331 | ||
3332 | pmu->pmu_state = pmu_state; | 3332 | pmu->pmu_state = pmu_state; |
3333 | 3333 | ||
@@ -3343,7 +3343,7 @@ static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, | |||
3343 | static int nvgpu_pg_init_task(void *arg) | 3343 | static int nvgpu_pg_init_task(void *arg) |
3344 | { | 3344 | { |
3345 | struct gk20a *g = (struct gk20a *)arg; | 3345 | struct gk20a *g = (struct gk20a *)arg; |
3346 | struct pmu_gk20a *pmu = &g->pmu; | 3346 | struct nvgpu_pmu *pmu = &g->pmu; |
3347 | struct nvgpu_pg_init *pg_init = &pmu->pg_init; | 3347 | struct nvgpu_pg_init *pg_init = &pmu->pg_init; |
3348 | u32 pmu_state = 0; | 3348 | u32 pmu_state = 0; |
3349 | 3349 | ||
@@ -3396,7 +3396,7 @@ static int nvgpu_pg_init_task(void *arg) | |||
3396 | 3396 | ||
3397 | static int nvgpu_init_task_pg_init(struct gk20a *g) | 3397 | static int nvgpu_init_task_pg_init(struct gk20a *g) |
3398 | { | 3398 | { |
3399 | struct pmu_gk20a *pmu = &g->pmu; | 3399 | struct nvgpu_pmu *pmu = &g->pmu; |
3400 | char thread_name[64]; | 3400 | char thread_name[64]; |
3401 | int err = 0; | 3401 | int err = 0; |
3402 | 3402 | ||
@@ -3415,7 +3415,7 @@ static int nvgpu_init_task_pg_init(struct gk20a *g) | |||
3415 | 3415 | ||
3416 | int gk20a_init_pmu_bind_fecs(struct gk20a *g) | 3416 | int gk20a_init_pmu_bind_fecs(struct gk20a *g) |
3417 | { | 3417 | { |
3418 | struct pmu_gk20a *pmu = &g->pmu; | 3418 | struct nvgpu_pmu *pmu = &g->pmu; |
3419 | struct pmu_cmd cmd; | 3419 | struct pmu_cmd cmd; |
3420 | u32 desc; | 3420 | u32 desc; |
3421 | int err = 0; | 3421 | int err = 0; |
@@ -3454,7 +3454,7 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g) | |||
3454 | 3454 | ||
3455 | static void pmu_setup_hw_load_zbc(struct gk20a *g) | 3455 | static void pmu_setup_hw_load_zbc(struct gk20a *g) |
3456 | { | 3456 | { |
3457 | struct pmu_gk20a *pmu = &g->pmu; | 3457 | struct nvgpu_pmu *pmu = &g->pmu; |
3458 | struct pmu_cmd cmd; | 3458 | struct pmu_cmd cmd; |
3459 | u32 desc; | 3459 | u32 desc; |
3460 | u32 gr_engine_id; | 3460 | u32 gr_engine_id; |
@@ -3489,7 +3489,7 @@ static void pmu_setup_hw_load_zbc(struct gk20a *g) | |||
3489 | 3489 | ||
3490 | static void pmu_setup_hw_enable_elpg(struct gk20a *g) | 3490 | static void pmu_setup_hw_enable_elpg(struct gk20a *g) |
3491 | { | 3491 | { |
3492 | struct pmu_gk20a *pmu = &g->pmu; | 3492 | struct nvgpu_pmu *pmu = &g->pmu; |
3493 | 3493 | ||
3494 | /* | 3494 | /* |
3495 | * FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to | 3495 | * FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to |
@@ -3532,7 +3532,7 @@ static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr) | |||
3532 | int gk20a_pmu_reset(struct gk20a *g) | 3532 | int gk20a_pmu_reset(struct gk20a *g) |
3533 | { | 3533 | { |
3534 | int err; | 3534 | int err; |
3535 | struct pmu_gk20a *pmu = &g->pmu; | 3535 | struct nvgpu_pmu *pmu = &g->pmu; |
3536 | 3536 | ||
3537 | err = pmu_reset(pmu); | 3537 | err = pmu_reset(pmu); |
3538 | 3538 | ||
@@ -3592,7 +3592,7 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops) | |||
3592 | 3592 | ||
3593 | int gk20a_init_pmu_support(struct gk20a *g) | 3593 | int gk20a_init_pmu_support(struct gk20a *g) |
3594 | { | 3594 | { |
3595 | struct pmu_gk20a *pmu = &g->pmu; | 3595 | struct nvgpu_pmu *pmu = &g->pmu; |
3596 | u32 err; | 3596 | u32 err; |
3597 | 3597 | ||
3598 | gk20a_dbg_fn(""); | 3598 | gk20a_dbg_fn(""); |
@@ -3621,7 +3621,7 @@ int gk20a_init_pmu_support(struct gk20a *g) | |||
3621 | static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, | 3621 | static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, |
3622 | void *param, u32 handle, u32 status) | 3622 | void *param, u32 handle, u32 status) |
3623 | { | 3623 | { |
3624 | struct pmu_gk20a *pmu = param; | 3624 | struct nvgpu_pmu *pmu = param; |
3625 | struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg; | 3625 | struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg; |
3626 | 3626 | ||
3627 | gk20a_dbg_fn(""); | 3627 | gk20a_dbg_fn(""); |
@@ -3681,7 +3681,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg, | |||
3681 | static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, | 3681 | static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, |
3682 | void *param, u32 handle, u32 status) | 3682 | void *param, u32 handle, u32 status) |
3683 | { | 3683 | { |
3684 | struct pmu_gk20a *pmu = param; | 3684 | struct nvgpu_pmu *pmu = param; |
3685 | 3685 | ||
3686 | gk20a_dbg_fn(""); | 3686 | gk20a_dbg_fn(""); |
3687 | 3687 | ||
@@ -3704,7 +3704,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg, | |||
3704 | 3704 | ||
3705 | static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) | 3705 | static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) |
3706 | { | 3706 | { |
3707 | struct pmu_gk20a *pmu = &g->pmu; | 3707 | struct nvgpu_pmu *pmu = &g->pmu; |
3708 | struct pmu_cmd cmd; | 3708 | struct pmu_cmd cmd; |
3709 | u32 seq; | 3709 | u32 seq; |
3710 | 3710 | ||
@@ -3766,7 +3766,7 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id) | |||
3766 | } | 3766 | } |
3767 | static int pmu_init_powergating(struct gk20a *g) | 3767 | static int pmu_init_powergating(struct gk20a *g) |
3768 | { | 3768 | { |
3769 | struct pmu_gk20a *pmu = &g->pmu; | 3769 | struct nvgpu_pmu *pmu = &g->pmu; |
3770 | u32 pg_engine_id; | 3770 | u32 pg_engine_id; |
3771 | u32 pg_engine_id_list = 0; | 3771 | u32 pg_engine_id_list = 0; |
3772 | 3772 | ||
@@ -3795,7 +3795,7 @@ static int pmu_init_powergating(struct gk20a *g) | |||
3795 | return 0; | 3795 | return 0; |
3796 | } | 3796 | } |
3797 | 3797 | ||
3798 | static u8 get_perfmon_id(struct pmu_gk20a *pmu) | 3798 | static u8 get_perfmon_id(struct nvgpu_pmu *pmu) |
3799 | { | 3799 | { |
3800 | struct gk20a *g = gk20a_from_pmu(pmu); | 3800 | struct gk20a *g = gk20a_from_pmu(pmu); |
3801 | u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl; | 3801 | u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl; |
@@ -3824,7 +3824,7 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu) | |||
3824 | return unit_id; | 3824 | return unit_id; |
3825 | } | 3825 | } |
3826 | 3826 | ||
3827 | static int pmu_init_perfmon(struct pmu_gk20a *pmu) | 3827 | static int pmu_init_perfmon(struct nvgpu_pmu *pmu) |
3828 | { | 3828 | { |
3829 | struct gk20a *g = gk20a_from_pmu(pmu); | 3829 | struct gk20a *g = gk20a_from_pmu(pmu); |
3830 | struct pmu_v *pv = &g->ops.pmu_ver; | 3830 | struct pmu_v *pv = &g->ops.pmu_ver; |
@@ -3924,7 +3924,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu) | |||
3924 | return 0; | 3924 | return 0; |
3925 | } | 3925 | } |
3926 | 3926 | ||
3927 | static int pmu_process_init_msg(struct pmu_gk20a *pmu, | 3927 | static int pmu_process_init_msg(struct nvgpu_pmu *pmu, |
3928 | struct pmu_msg *msg) | 3928 | struct pmu_msg *msg) |
3929 | { | 3929 | { |
3930 | struct gk20a *g = gk20a_from_pmu(pmu); | 3930 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4002,7 +4002,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu, | |||
4002 | return 0; | 4002 | return 0; |
4003 | } | 4003 | } |
4004 | 4004 | ||
4005 | static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue, | 4005 | static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, |
4006 | struct pmu_msg *msg, int *status) | 4006 | struct pmu_msg *msg, int *status) |
4007 | { | 4007 | { |
4008 | struct gk20a *g = gk20a_from_pmu(pmu); | 4008 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4077,7 +4077,7 @@ clean_up: | |||
4077 | return false; | 4077 | return false; |
4078 | } | 4078 | } |
4079 | 4079 | ||
4080 | static int pmu_response_handle(struct pmu_gk20a *pmu, | 4080 | static int pmu_response_handle(struct nvgpu_pmu *pmu, |
4081 | struct pmu_msg *msg) | 4081 | struct pmu_msg *msg) |
4082 | { | 4082 | { |
4083 | struct gk20a *g = gk20a_from_pmu(pmu); | 4083 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4168,14 +4168,14 @@ static int pmu_response_handle(struct pmu_gk20a *pmu, | |||
4168 | static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, | 4168 | static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, |
4169 | void *param, u32 handle, u32 status) | 4169 | void *param, u32 handle, u32 status) |
4170 | { | 4170 | { |
4171 | struct pmu_gk20a *pmu = param; | 4171 | struct nvgpu_pmu *pmu = param; |
4172 | gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); | 4172 | gk20a_dbg_pmu("reply ZBC_TABLE_UPDATE"); |
4173 | pmu->zbc_save_done = 1; | 4173 | pmu->zbc_save_done = 1; |
4174 | } | 4174 | } |
4175 | 4175 | ||
4176 | void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) | 4176 | void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) |
4177 | { | 4177 | { |
4178 | struct pmu_gk20a *pmu = &g->pmu; | 4178 | struct nvgpu_pmu *pmu = &g->pmu; |
4179 | struct pmu_cmd cmd; | 4179 | struct pmu_cmd cmd; |
4180 | u32 seq; | 4180 | u32 seq; |
4181 | 4181 | ||
@@ -4199,7 +4199,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) | |||
4199 | nvgpu_err(g, "ZBC save timeout"); | 4199 | nvgpu_err(g, "ZBC save timeout"); |
4200 | } | 4200 | } |
4201 | 4201 | ||
4202 | int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) | 4202 | int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu) |
4203 | { | 4203 | { |
4204 | struct gk20a *g = gk20a_from_pmu(pmu); | 4204 | struct gk20a *g = gk20a_from_pmu(pmu); |
4205 | struct pmu_v *pv = &g->ops.pmu_ver; | 4205 | struct pmu_v *pv = &g->ops.pmu_ver; |
@@ -4243,7 +4243,7 @@ int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) | |||
4243 | return 0; | 4243 | return 0; |
4244 | } | 4244 | } |
4245 | 4245 | ||
4246 | int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu) | 4246 | int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu) |
4247 | { | 4247 | { |
4248 | struct gk20a *g = gk20a_from_pmu(pmu); | 4248 | struct gk20a *g = gk20a_from_pmu(pmu); |
4249 | struct pmu_cmd cmd; | 4249 | struct pmu_cmd cmd; |
@@ -4261,7 +4261,7 @@ int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu) | |||
4261 | return 0; | 4261 | return 0; |
4262 | } | 4262 | } |
4263 | 4263 | ||
4264 | static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu, | 4264 | static int pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, |
4265 | struct pmu_perfmon_msg *msg) | 4265 | struct pmu_perfmon_msg *msg) |
4266 | { | 4266 | { |
4267 | gk20a_dbg_fn(""); | 4267 | gk20a_dbg_fn(""); |
@@ -4294,7 +4294,7 @@ static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu, | |||
4294 | } | 4294 | } |
4295 | 4295 | ||
4296 | 4296 | ||
4297 | static int pmu_handle_therm_event(struct pmu_gk20a *pmu, | 4297 | static int pmu_handle_therm_event(struct nvgpu_pmu *pmu, |
4298 | struct nv_pmu_therm_msg *msg) | 4298 | struct nv_pmu_therm_msg *msg) |
4299 | { | 4299 | { |
4300 | gk20a_dbg_fn(""); | 4300 | gk20a_dbg_fn(""); |
@@ -4318,7 +4318,7 @@ static int pmu_handle_therm_event(struct pmu_gk20a *pmu, | |||
4318 | return 0; | 4318 | return 0; |
4319 | } | 4319 | } |
4320 | 4320 | ||
4321 | static int pmu_handle_event(struct pmu_gk20a *pmu, struct pmu_msg *msg) | 4321 | static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg) |
4322 | { | 4322 | { |
4323 | int err = 0; | 4323 | int err = 0; |
4324 | struct gk20a *g = gk20a_from_pmu(pmu); | 4324 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4347,7 +4347,7 @@ static int pmu_handle_event(struct pmu_gk20a *pmu, struct pmu_msg *msg) | |||
4347 | return err; | 4347 | return err; |
4348 | } | 4348 | } |
4349 | 4349 | ||
4350 | static int pmu_process_message(struct pmu_gk20a *pmu) | 4350 | static int pmu_process_message(struct nvgpu_pmu *pmu) |
4351 | { | 4351 | { |
4352 | struct pmu_msg msg; | 4352 | struct pmu_msg msg; |
4353 | int status; | 4353 | int status; |
@@ -4383,7 +4383,7 @@ static int pmu_process_message(struct pmu_gk20a *pmu) | |||
4383 | return 0; | 4383 | return 0; |
4384 | } | 4384 | } |
4385 | 4385 | ||
4386 | int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, | 4386 | int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, |
4387 | u32 *var, u32 val) | 4387 | u32 *var, u32 val) |
4388 | { | 4388 | { |
4389 | struct gk20a *g = gk20a_from_pmu(pmu); | 4389 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4411,7 +4411,7 @@ int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, | |||
4411 | return -ETIMEDOUT; | 4411 | return -ETIMEDOUT; |
4412 | } | 4412 | } |
4413 | 4413 | ||
4414 | static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu) | 4414 | static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) |
4415 | { | 4415 | { |
4416 | struct gk20a *g = gk20a_from_pmu(pmu); | 4416 | struct gk20a *g = gk20a_from_pmu(pmu); |
4417 | struct pmu_pg_stats stats; | 4417 | struct pmu_pg_stats stats; |
@@ -4484,7 +4484,7 @@ static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu) | |||
4484 | */ | 4484 | */ |
4485 | } | 4485 | } |
4486 | 4486 | ||
4487 | void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) | 4487 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) |
4488 | { | 4488 | { |
4489 | struct gk20a *g = gk20a_from_pmu(pmu); | 4489 | struct gk20a *g = gk20a_from_pmu(pmu); |
4490 | unsigned int i; | 4490 | unsigned int i; |
@@ -4610,7 +4610,7 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) | |||
4610 | 4610 | ||
4611 | void gk20a_pmu_isr(struct gk20a *g) | 4611 | void gk20a_pmu_isr(struct gk20a *g) |
4612 | { | 4612 | { |
4613 | struct pmu_gk20a *pmu = &g->pmu; | 4613 | struct nvgpu_pmu *pmu = &g->pmu; |
4614 | struct pmu_queue *queue; | 4614 | struct pmu_queue *queue; |
4615 | u32 intr, mask; | 4615 | u32 intr, mask; |
4616 | bool recheck = false; | 4616 | bool recheck = false; |
@@ -4672,7 +4672,7 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
4672 | nvgpu_mutex_release(&pmu->isr_mutex); | 4672 | nvgpu_mutex_release(&pmu->isr_mutex); |
4673 | } | 4673 | } |
4674 | 4674 | ||
4675 | static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, | 4675 | static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, |
4676 | struct pmu_msg *msg, struct pmu_payload *payload, | 4676 | struct pmu_msg *msg, struct pmu_payload *payload, |
4677 | u32 queue_id) | 4677 | u32 queue_id) |
4678 | { | 4678 | { |
@@ -4742,7 +4742,7 @@ invalid_cmd: | |||
4742 | return false; | 4742 | return false; |
4743 | } | 4743 | } |
4744 | 4744 | ||
4745 | static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, | 4745 | static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, |
4746 | u32 queue_id, unsigned long timeout_ms) | 4746 | u32 queue_id, unsigned long timeout_ms) |
4747 | { | 4747 | { |
4748 | struct gk20a *g = gk20a_from_pmu(pmu); | 4748 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -4832,7 +4832,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, | |||
4832 | u32 queue_id, pmu_callback callback, void* cb_param, | 4832 | u32 queue_id, pmu_callback callback, void* cb_param, |
4833 | u32 *seq_desc, unsigned long timeout) | 4833 | u32 *seq_desc, unsigned long timeout) |
4834 | { | 4834 | { |
4835 | struct pmu_gk20a *pmu = &g->pmu; | 4835 | struct nvgpu_pmu *pmu = &g->pmu; |
4836 | struct pmu_v *pv = &g->ops.pmu_ver; | 4836 | struct pmu_v *pv = &g->ops.pmu_ver; |
4837 | struct pmu_sequence *seq; | 4837 | struct pmu_sequence *seq; |
4838 | void *in = NULL, *out = NULL; | 4838 | void *in = NULL, *out = NULL; |
@@ -5022,7 +5022,7 @@ int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg) | |||
5022 | 5022 | ||
5023 | static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) | 5023 | static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) |
5024 | { | 5024 | { |
5025 | struct pmu_gk20a *pmu = &g->pmu; | 5025 | struct nvgpu_pmu *pmu = &g->pmu; |
5026 | struct pmu_cmd cmd; | 5026 | struct pmu_cmd cmd; |
5027 | u32 seq, status; | 5027 | u32 seq, status; |
5028 | 5028 | ||
@@ -5057,7 +5057,7 @@ static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id) | |||
5057 | 5057 | ||
5058 | int gk20a_pmu_enable_elpg(struct gk20a *g) | 5058 | int gk20a_pmu_enable_elpg(struct gk20a *g) |
5059 | { | 5059 | { |
5060 | struct pmu_gk20a *pmu = &g->pmu; | 5060 | struct nvgpu_pmu *pmu = &g->pmu; |
5061 | struct gr_gk20a *gr = &g->gr; | 5061 | struct gr_gk20a *gr = &g->gr; |
5062 | u32 pg_engine_id; | 5062 | u32 pg_engine_id; |
5063 | u32 pg_engine_id_list = 0; | 5063 | u32 pg_engine_id_list = 0; |
@@ -5115,7 +5115,7 @@ exit_unlock: | |||
5115 | 5115 | ||
5116 | int gk20a_pmu_disable_elpg(struct gk20a *g) | 5116 | int gk20a_pmu_disable_elpg(struct gk20a *g) |
5117 | { | 5117 | { |
5118 | struct pmu_gk20a *pmu = &g->pmu; | 5118 | struct nvgpu_pmu *pmu = &g->pmu; |
5119 | struct pmu_cmd cmd; | 5119 | struct pmu_cmd cmd; |
5120 | u32 seq; | 5120 | u32 seq; |
5121 | int ret = 0; | 5121 | int ret = 0; |
@@ -5225,7 +5225,7 @@ exit_unlock: | |||
5225 | 5225 | ||
5226 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) | 5226 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) |
5227 | { | 5227 | { |
5228 | struct pmu_gk20a *pmu = &g->pmu; | 5228 | struct nvgpu_pmu *pmu = &g->pmu; |
5229 | int err; | 5229 | int err; |
5230 | 5230 | ||
5231 | gk20a_dbg_fn(""); | 5231 | gk20a_dbg_fn(""); |
@@ -5240,7 +5240,7 @@ int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) | |||
5240 | 5240 | ||
5241 | int gk20a_pmu_destroy(struct gk20a *g) | 5241 | int gk20a_pmu_destroy(struct gk20a *g) |
5242 | { | 5242 | { |
5243 | struct pmu_gk20a *pmu = &g->pmu; | 5243 | struct nvgpu_pmu *pmu = &g->pmu; |
5244 | struct pmu_pg_stats_data pg_stat_data = { 0 }; | 5244 | struct pmu_pg_stats_data pg_stat_data = { 0 }; |
5245 | struct nvgpu_timeout timeout; | 5245 | struct nvgpu_timeout timeout; |
5246 | int i; | 5246 | int i; |
@@ -5306,7 +5306,7 @@ int gk20a_pmu_load_norm(struct gk20a *g, u32 *load) | |||
5306 | 5306 | ||
5307 | int gk20a_pmu_load_update(struct gk20a *g) | 5307 | int gk20a_pmu_load_update(struct gk20a *g) |
5308 | { | 5308 | { |
5309 | struct pmu_gk20a *pmu = &g->pmu; | 5309 | struct nvgpu_pmu *pmu = &g->pmu; |
5310 | u16 _load = 0; | 5310 | u16 _load = 0; |
5311 | 5311 | ||
5312 | if (!pmu->perfmon_ready) { | 5312 | if (!pmu->perfmon_ready) { |
@@ -5354,7 +5354,7 @@ void gk20a_pmu_reset_load_counters(struct gk20a *g) | |||
5354 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, | 5354 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, |
5355 | struct pmu_pg_stats_data *pg_stat_data) | 5355 | struct pmu_pg_stats_data *pg_stat_data) |
5356 | { | 5356 | { |
5357 | struct pmu_gk20a *pmu = &g->pmu; | 5357 | struct nvgpu_pmu *pmu = &g->pmu; |
5358 | struct pmu_pg_stats stats; | 5358 | struct pmu_pg_stats stats; |
5359 | 5359 | ||
5360 | pmu_copy_from_dmem(pmu, | 5360 | pmu_copy_from_dmem(pmu, |
@@ -5372,7 +5372,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, | |||
5372 | u32 pg_engine_id, | 5372 | u32 pg_engine_id, |
5373 | struct pmu_pg_stats_data *pg_stat_data) | 5373 | struct pmu_pg_stats_data *pg_stat_data) |
5374 | { | 5374 | { |
5375 | struct pmu_gk20a *pmu = &g->pmu; | 5375 | struct nvgpu_pmu *pmu = &g->pmu; |
5376 | u32 pg_engine_id_list = 0; | 5376 | u32 pg_engine_id_list = 0; |
5377 | 5377 | ||
5378 | if (!pmu->initialized) { | 5378 | if (!pmu->initialized) { |
@@ -5396,7 +5396,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, | |||
5396 | int gk20a_pmu_ap_send_command(struct gk20a *g, | 5396 | int gk20a_pmu_ap_send_command(struct gk20a *g, |
5397 | union pmu_ap_cmd *p_ap_cmd, bool b_block) | 5397 | union pmu_ap_cmd *p_ap_cmd, bool b_block) |
5398 | { | 5398 | { |
5399 | struct pmu_gk20a *pmu = &g->pmu; | 5399 | struct nvgpu_pmu *pmu = &g->pmu; |
5400 | /* FIXME: where is the PG structure defined?? */ | 5400 | /* FIXME: where is the PG structure defined?? */ |
5401 | u32 status = 0; | 5401 | u32 status = 0; |
5402 | struct pmu_cmd cmd; | 5402 | struct pmu_cmd cmd; |