diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2018-07-11 05:30:45 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-07-31 04:25:41 -0400 |
commit | 2d454db04fcc0c03e05b4665831e5780240d79b8 (patch) | |
tree | c18fd4bc302ea68e57e1e1d95c0f253e800bc043 /drivers/gpu/nvgpu/common | |
parent | d32692ae2427693daf85b3c7b4e24cd36471dec6 (diff) |
gpu: nvgpu: falcon queue support
-Renamed "struct pmu_queue" to "struct
nvgpu_falcon_queue" & moved to falcon.h
-Renamed pmu_queue_* functions to flcn_queue_* &
moved to new file falcon_queue.c
-Created ops for queue functions in struct
nvgpu_falcon_queue to support different queue
types like DMEM/FB-Q.
-Created ops in nvgpu_falcon_engine_dependency_ops
to add engine specific queue functionality & assigned
correct HAL functions in hal*.c file.
-Made changes in dependent functions as needed to replace
struct pmu_queue & calling queue functions using
nvgpu_falcon_queue data structure.
-Replaced input param "struct nvgpu_pmu *pmu" with
"struct gk20a *g" for pmu ops pmu_queue_head/pmu_queue_tail
& also for functions gk20a_pmu_queue_head()/
gk20a_pmu_queue_tail().
-Made changes in nvgpu_pmu_queue_init() to use nvgpu_falcon_queue
for PMU queue.
-Modified Makefile to include falcon_queue.o
-Modified Makefile.sources to include falcon_queue.c
Change-Id: I956328f6631b7154267fd5a29eaa1826190d99d1
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1776070
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r-- | drivers/gpu/nvgpu/common/falcon/falcon_queue.c | 422 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu.c | 5 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu_fw.c | 12 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu_ipc.c | 353 |
4 files changed, 481 insertions, 311 deletions
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c new file mode 100644 index 00000000..6834821c --- /dev/null +++ b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c | |||
@@ -0,0 +1,422 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <nvgpu/lock.h> | ||
24 | #include <nvgpu/timers.h> | ||
25 | #include <nvgpu/pmu.h> | ||
26 | #include <nvgpu/falcon.h> | ||
27 | |||
28 | /* DMEM-Q specific ops */ | ||
29 | static int flcn_queue_head_dmem(struct nvgpu_falcon *flcn, | ||
30 | struct nvgpu_falcon_queue *queue, u32 *head, bool set) | ||
31 | { | ||
32 | int err = -ENOSYS; | ||
33 | |||
34 | if (flcn->flcn_engine_dep_ops.queue_head != NULL) { | ||
35 | err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue, | ||
36 | head, set); | ||
37 | } | ||
38 | |||
39 | return err; | ||
40 | } | ||
41 | |||
42 | static int flcn_queue_tail_dmem(struct nvgpu_falcon *flcn, | ||
43 | struct nvgpu_falcon_queue *queue, u32 *tail, bool set) | ||
44 | { | ||
45 | int err = -ENOSYS; | ||
46 | |||
47 | if (flcn->flcn_engine_dep_ops.queue_tail != NULL) { | ||
48 | err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue, | ||
49 | tail, set); | ||
50 | } | ||
51 | |||
52 | return err; | ||
53 | } | ||
54 | |||
55 | static bool flcn_queue_has_room_dmem(struct nvgpu_falcon *flcn, | ||
56 | struct nvgpu_falcon_queue *queue, u32 size, bool *need_rewind) | ||
57 | { | ||
58 | u32 q_head = 0; | ||
59 | u32 q_tail = 0; | ||
60 | u32 q_free = 0; | ||
61 | bool q_rewind = false; | ||
62 | int err = 0; | ||
63 | |||
64 | size = ALIGN(size, QUEUE_ALIGNMENT); | ||
65 | |||
66 | err = queue->head(flcn, queue, &q_head, QUEUE_GET); | ||
67 | if (err != 0) { | ||
68 | nvgpu_err(flcn->g, "queue head GET failed"); | ||
69 | goto exit; | ||
70 | } | ||
71 | |||
72 | err = queue->tail(flcn, queue, &q_tail, QUEUE_GET); | ||
73 | if (err != 0) { | ||
74 | nvgpu_err(flcn->g, "queue tail GET failed"); | ||
75 | goto exit; | ||
76 | } | ||
77 | |||
78 | if (q_head >= q_tail) { | ||
79 | q_free = queue->offset + queue->size - q_head; | ||
80 | q_free -= (u32)PMU_CMD_HDR_SIZE; | ||
81 | |||
82 | if (size > q_free) { | ||
83 | q_rewind = true; | ||
84 | q_head = queue->offset; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | if (q_head < q_tail) { | ||
89 | q_free = q_tail - q_head - 1U; | ||
90 | } | ||
91 | |||
92 | if (need_rewind != NULL) { | ||
93 | *need_rewind = q_rewind; | ||
94 | } | ||
95 | |||
96 | exit: | ||
97 | return size <= q_free; | ||
98 | } | ||
99 | |||
100 | static int flcn_queue_push_dmem(struct nvgpu_falcon *flcn, | ||
101 | struct nvgpu_falcon_queue *queue, void *data, u32 size) | ||
102 | { | ||
103 | int err = 0; | ||
104 | |||
105 | err = nvgpu_flcn_copy_to_dmem(flcn, queue->position, data, size, 0); | ||
106 | if (err != 0) { | ||
107 | nvgpu_err(flcn->g, "flcn-%d, queue-%d", flcn->flcn_id, | ||
108 | queue->id); | ||
109 | nvgpu_err(flcn->g, "dmem queue write failed"); | ||
110 | goto exit; | ||
111 | } | ||
112 | |||
113 | queue->position += ALIGN(size, QUEUE_ALIGNMENT); | ||
114 | |||
115 | exit: | ||
116 | return err; | ||
117 | } | ||
118 | |||
119 | static int flcn_queue_pop_dmem(struct nvgpu_falcon *flcn, | ||
120 | struct nvgpu_falcon_queue *queue, void *data, u32 size, | ||
121 | u32 *bytes_read) | ||
122 | { | ||
123 | struct gk20a *g = flcn->g; | ||
124 | u32 q_tail = queue->position; | ||
125 | u32 q_head = 0; | ||
126 | u32 used = 0; | ||
127 | int err = 0; | ||
128 | |||
129 | *bytes_read = 0; | ||
130 | |||
131 | err = queue->head(flcn, queue, &q_head, QUEUE_GET); | ||
132 | if (err != 0) { | ||
133 | nvgpu_err(flcn->g, "flcn-%d, queue-%d, head GET failed", | ||
134 | flcn->flcn_id, queue->id); | ||
135 | goto exit; | ||
136 | } | ||
137 | |||
138 | if (q_head == q_tail) { | ||
139 | goto exit; | ||
140 | } else if (q_head > q_tail) { | ||
141 | used = q_head - q_tail; | ||
142 | } else { | ||
143 | used = queue->offset + queue->size - q_tail; | ||
144 | } | ||
145 | |||
146 | if (size > used) { | ||
147 | nvgpu_warn(g, "queue size smaller than request read"); | ||
148 | size = used; | ||
149 | } | ||
150 | |||
151 | err = nvgpu_flcn_copy_from_dmem(flcn, q_tail, data, size, 0); | ||
152 | if (err != 0) { | ||
153 | nvgpu_err(g, "flcn-%d, queue-%d", flcn->flcn_id, | ||
154 | queue->id); | ||
155 | nvgpu_err(flcn->g, "dmem queue read failed"); | ||
156 | goto exit; | ||
157 | } | ||
158 | |||
159 | queue->position += ALIGN(size, QUEUE_ALIGNMENT); | ||
160 | *bytes_read = size; | ||
161 | |||
162 | exit: | ||
163 | return err; | ||
164 | } | ||
165 | |||
166 | static int flcn_queue_rewind_dmem(struct nvgpu_falcon *flcn, | ||
167 | struct nvgpu_falcon_queue *queue) | ||
168 | { | ||
169 | struct gk20a *g = flcn->g; | ||
170 | struct pmu_cmd cmd; | ||
171 | int err = 0; | ||
172 | |||
173 | if (queue->oflag == OFLAG_WRITE) { | ||
174 | cmd.hdr.unit_id = PMU_UNIT_REWIND; | ||
175 | cmd.hdr.size = (u8)PMU_CMD_HDR_SIZE; | ||
176 | err = queue->push(flcn, queue, &cmd, cmd.hdr.size); | ||
177 | if (err != 0) { | ||
178 | nvgpu_err(g, "flcn-%d queue-%d, rewind request failed", | ||
179 | flcn->flcn_id, queue->id); | ||
180 | goto exit; | ||
181 | } else { | ||
182 | nvgpu_pmu_dbg(g, "flcn-%d queue-%d, rewinded", | ||
183 | flcn->flcn_id, queue->id); | ||
184 | } | ||
185 | } | ||
186 | |||
187 | /* update queue position */ | ||
188 | queue->position = queue->offset; | ||
189 | |||
190 | if (queue->oflag == OFLAG_READ) { | ||
191 | err = queue->tail(flcn, queue, &queue->position, | ||
192 | QUEUE_SET); | ||
193 | if (err != 0){ | ||
194 | nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed", | ||
195 | flcn->flcn_id, queue->id); | ||
196 | goto exit; | ||
197 | } | ||
198 | } | ||
199 | |||
200 | exit: | ||
201 | return err; | ||
202 | } | ||
203 | |||
204 | /* assign DMEM queue type specific ops */ | ||
205 | static void flcn_queue_init_dmem_queue(struct nvgpu_falcon *flcn, | ||
206 | struct nvgpu_falcon_queue *queue) | ||
207 | { | ||
208 | queue->head = flcn_queue_head_dmem; | ||
209 | queue->tail = flcn_queue_tail_dmem; | ||
210 | queue->has_room = flcn_queue_has_room_dmem; | ||
211 | queue->push = flcn_queue_push_dmem; | ||
212 | queue->pop = flcn_queue_pop_dmem; | ||
213 | queue->rewind = flcn_queue_rewind_dmem; | ||
214 | } | ||
215 | |||
216 | static int flcn_queue_prepare_write(struct nvgpu_falcon *flcn, | ||
217 | struct nvgpu_falcon_queue *queue, u32 size) | ||
218 | { | ||
219 | bool q_rewind = false; | ||
220 | int err = 0; | ||
221 | |||
222 | /* make sure there's enough free space for the write */ | ||
223 | if (!queue->has_room(flcn, queue, size, &q_rewind)) { | ||
224 | nvgpu_pmu_dbg(flcn->g, "queue full: queue-id %d: index %d", | ||
225 | queue->id, queue->index); | ||
226 | err = -EAGAIN; | ||
227 | goto exit; | ||
228 | } | ||
229 | |||
230 | err = queue->head(flcn, queue, &queue->position, QUEUE_GET); | ||
231 | if (err != 0) { | ||
232 | nvgpu_err(flcn->g, "flcn-%d queue-%d, position GET failed", | ||
233 | flcn->flcn_id, queue->id); | ||
234 | goto exit; | ||
235 | } | ||
236 | |||
237 | if (q_rewind) { | ||
238 | err = queue->rewind(flcn, queue); | ||
239 | } | ||
240 | |||
241 | exit: | ||
242 | return err; | ||
243 | } | ||
244 | |||
245 | /* queue public functions */ | ||
246 | |||
247 | /* queue push operation with lock */ | ||
248 | int nvgpu_flcn_queue_push(struct nvgpu_falcon *flcn, | ||
249 | struct nvgpu_falcon_queue *queue, void *data, u32 size) | ||
250 | { | ||
251 | int err = 0; | ||
252 | |||
253 | if (queue->oflag != OFLAG_WRITE) { | ||
254 | nvgpu_err(flcn->g, "flcn-%d, queue-%d not opened for write", | ||
255 | flcn->flcn_id, queue->id); | ||
256 | err = -EINVAL; | ||
257 | goto exit; | ||
258 | } | ||
259 | |||
260 | /* acquire mutex */ | ||
261 | nvgpu_mutex_acquire(&queue->mutex); | ||
262 | |||
263 | err = flcn_queue_prepare_write(flcn, queue, size); | ||
264 | if (err != 0) { | ||
265 | nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to open", | ||
266 | flcn->flcn_id, queue->id); | ||
267 | goto unlock_mutex; | ||
268 | } | ||
269 | |||
270 | err = queue->push(flcn, queue, data, size); | ||
271 | if (err != 0) { | ||
272 | nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to write", | ||
273 | flcn->flcn_id, queue->id); | ||
274 | } | ||
275 | |||
276 | err = queue->head(flcn, queue, &queue->position, QUEUE_SET); | ||
277 | if (err != 0){ | ||
278 | nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed", | ||
279 | flcn->flcn_id, queue->id); | ||
280 | } | ||
281 | |||
282 | unlock_mutex: | ||
283 | /* release mutex */ | ||
284 | nvgpu_mutex_release(&queue->mutex); | ||
285 | exit: | ||
286 | return err; | ||
287 | } | ||
288 | |||
289 | /* queue pop operation with lock */ | ||
290 | int nvgpu_flcn_queue_pop(struct nvgpu_falcon *flcn, | ||
291 | struct nvgpu_falcon_queue *queue, void *data, u32 size, | ||
292 | u32 *bytes_read) | ||
293 | { | ||
294 | int err = 0; | ||
295 | |||
296 | if (queue->oflag != OFLAG_READ) { | ||
297 | nvgpu_err(flcn->g, "flcn-%d, queue-%d, not opened for read", | ||
298 | flcn->flcn_id, queue->id); | ||
299 | err = -EINVAL; | ||
300 | goto exit; | ||
301 | } | ||
302 | |||
303 | /* acquire mutex */ | ||
304 | nvgpu_mutex_acquire(&queue->mutex); | ||
305 | |||
306 | err = queue->tail(flcn, queue, &queue->position, QUEUE_GET); | ||
307 | if (err != 0) { | ||
308 | nvgpu_err(flcn->g, "flcn-%d queue-%d, position GET failed", | ||
309 | flcn->flcn_id, queue->id); | ||
310 | goto unlock_mutex; | ||
311 | } | ||
312 | |||
313 | err = queue->pop(flcn, queue, data, size, bytes_read); | ||
314 | if (err != 0) { | ||
315 | nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to read", | ||
316 | flcn->flcn_id, queue->id); | ||
317 | } | ||
318 | |||
319 | err = queue->tail(flcn, queue, &queue->position, QUEUE_SET); | ||
320 | if (err != 0){ | ||
321 | nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed", | ||
322 | flcn->flcn_id, queue->id); | ||
323 | } | ||
324 | |||
325 | unlock_mutex: | ||
326 | /* release mutex */ | ||
327 | nvgpu_mutex_release(&queue->mutex); | ||
328 | exit: | ||
329 | return err; | ||
330 | } | ||
331 | |||
332 | int nvgpu_flcn_queue_rewind(struct nvgpu_falcon *flcn, | ||
333 | struct nvgpu_falcon_queue *queue) | ||
334 | { | ||
335 | int err = 0; | ||
336 | |||
337 | /* acquire mutex */ | ||
338 | nvgpu_mutex_acquire(&queue->mutex); | ||
339 | |||
340 | if (queue->rewind != NULL) { | ||
341 | err = queue->rewind(flcn, queue); | ||
342 | } | ||
343 | |||
344 | /* release mutex */ | ||
345 | nvgpu_mutex_release(&queue->mutex); | ||
346 | |||
347 | return err; | ||
348 | } | ||
349 | |||
350 | /* queue is_empty check with lock */ | ||
351 | bool nvgpu_flcn_queue_is_empty(struct nvgpu_falcon *flcn, | ||
352 | struct nvgpu_falcon_queue *queue) | ||
353 | { | ||
354 | u32 q_head = 0; | ||
355 | u32 q_tail = 0; | ||
356 | int err = 0; | ||
357 | |||
358 | /* acquire mutex */ | ||
359 | nvgpu_mutex_acquire(&queue->mutex); | ||
360 | |||
361 | err = queue->head(flcn, queue, &q_head, QUEUE_GET); | ||
362 | if (err != 0) { | ||
363 | nvgpu_err(flcn->g, "flcn-%d queue-%d, head GET failed", | ||
364 | flcn->flcn_id, queue->id); | ||
365 | goto exit; | ||
366 | } | ||
367 | |||
368 | err = queue->tail(flcn, queue, &q_tail, QUEUE_GET); | ||
369 | if (err != 0) { | ||
370 | nvgpu_err(flcn->g, "flcn-%d queue-%d, tail GET failed", | ||
371 | flcn->flcn_id, queue->id); | ||
372 | goto exit; | ||
373 | } | ||
374 | |||
375 | exit: | ||
376 | /* release mutex */ | ||
377 | nvgpu_mutex_release(&queue->mutex); | ||
378 | |||
379 | return q_head == q_tail; | ||
380 | } | ||
381 | |||
382 | void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn, | ||
383 | struct nvgpu_falcon_queue *queue) | ||
384 | { | ||
385 | nvgpu_log(flcn->g, gpu_dbg_pmu, "flcn id-%d q-id %d: index %d ", | ||
386 | flcn->flcn_id, queue->id, queue->index); | ||
387 | |||
388 | /* destroy mutex */ | ||
389 | nvgpu_mutex_destroy(&queue->mutex); | ||
390 | |||
391 | /* clear data*/ | ||
392 | memset(queue, 0, sizeof(struct nvgpu_falcon_queue)); | ||
393 | } | ||
394 | |||
395 | int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn, | ||
396 | struct nvgpu_falcon_queue *queue) | ||
397 | { | ||
398 | struct gk20a *g = flcn->g; | ||
399 | int err = 0; | ||
400 | |||
401 | nvgpu_log(g, gpu_dbg_pmu, | ||
402 | "flcn id-%d q-id %d: index %d, offset 0x%08x, size 0x%08x", | ||
403 | flcn->flcn_id, queue->id, queue->index, | ||
404 | queue->offset, queue->size); | ||
405 | |||
406 | /* init mutex */ | ||
407 | err = nvgpu_mutex_init(&queue->mutex); | ||
408 | if (err != 0) { | ||
409 | goto exit; | ||
410 | } | ||
411 | |||
412 | flcn_queue_init_dmem_queue(flcn, queue); | ||
413 | |||
414 | exit: | ||
415 | if (err != 0) { | ||
416 | nvgpu_err(flcn->g, "flcn-%d queue-%d, init failed", | ||
417 | flcn->flcn_id, queue->id); | ||
418 | } | ||
419 | |||
420 | return err; | ||
421 | } | ||
422 | |||
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index b9ac50be..327f67d3 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c | |||
@@ -517,8 +517,9 @@ int nvgpu_pmu_destroy(struct gk20a *g) | |||
517 | pmu->isr_enabled = false; | 517 | pmu->isr_enabled = false; |
518 | nvgpu_mutex_release(&pmu->isr_mutex); | 518 | nvgpu_mutex_release(&pmu->isr_mutex); |
519 | 519 | ||
520 | for (i = 0; i < PMU_QUEUE_COUNT; i++) | 520 | for (i = 0; i < PMU_QUEUE_COUNT; i++) { |
521 | nvgpu_mutex_destroy(&pmu->queue[i].mutex); | 521 | nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]); |
522 | } | ||
522 | 523 | ||
523 | nvgpu_pmu_state_change(g, PMU_STATE_OFF, false); | 524 | nvgpu_pmu_state_change(g, PMU_STATE_OFF, false); |
524 | pmu->pmu_ready = false; | 525 | pmu->pmu_ready = false; |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c index 359c19ca..53eae49a 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c | |||
@@ -848,7 +848,8 @@ static void perfmon_cmd_init_set_mov_avg_v1(struct pmu_perfmon_cmd *pc, | |||
848 | init->samples_in_moving_avg = value; | 848 | init->samples_in_moving_avg = value; |
849 | } | 849 | } |
850 | 850 | ||
851 | static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue, | 851 | static void get_pmu_init_msg_pmu_queue_params_v1( |
852 | struct nvgpu_falcon_queue *queue, | ||
852 | u32 id, void *pmu_init_msg) | 853 | u32 id, void *pmu_init_msg) |
853 | { | 854 | { |
854 | struct pmu_init_msg_pmu_v1 *init = | 855 | struct pmu_init_msg_pmu_v1 *init = |
@@ -859,7 +860,8 @@ static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue, | |||
859 | queue->size = init->queue_info[id].size; | 860 | queue->size = init->queue_info[id].size; |
860 | } | 861 | } |
861 | 862 | ||
862 | static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue, | 863 | static void get_pmu_init_msg_pmu_queue_params_v4( |
864 | struct nvgpu_falcon_queue *queue, | ||
863 | u32 id, void *pmu_init_msg) | 865 | u32 id, void *pmu_init_msg) |
864 | { | 866 | { |
865 | struct pmu_init_msg_pmu_v4 *init = pmu_init_msg; | 867 | struct pmu_init_msg_pmu_v4 *init = pmu_init_msg; |
@@ -885,7 +887,8 @@ static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue, | |||
885 | queue->offset = init->queue_offset + current_ptr; | 887 | queue->offset = init->queue_offset + current_ptr; |
886 | } | 888 | } |
887 | 889 | ||
888 | static void get_pmu_init_msg_pmu_queue_params_v5(struct pmu_queue *queue, | 890 | static void get_pmu_init_msg_pmu_queue_params_v5( |
891 | struct nvgpu_falcon_queue *queue, | ||
889 | u32 id, void *pmu_init_msg) | 892 | u32 id, void *pmu_init_msg) |
890 | { | 893 | { |
891 | struct pmu_init_msg_pmu_v5 *init = pmu_init_msg; | 894 | struct pmu_init_msg_pmu_v5 *init = pmu_init_msg; |
@@ -911,7 +914,8 @@ static void get_pmu_init_msg_pmu_queue_params_v5(struct pmu_queue *queue, | |||
911 | queue->offset = init->queue_offset + current_ptr; | 914 | queue->offset = init->queue_offset + current_ptr; |
912 | } | 915 | } |
913 | 916 | ||
914 | static void get_pmu_init_msg_pmu_queue_params_v3(struct pmu_queue *queue, | 917 | static void get_pmu_init_msg_pmu_queue_params_v3( |
918 | struct nvgpu_falcon_queue *queue, | ||
915 | u32 id, void *pmu_init_msg) | 919 | u32 id, void *pmu_init_msg) |
916 | { | 920 | { |
917 | struct pmu_init_msg_pmu_v3 *init = | 921 | struct pmu_init_msg_pmu_v3 *init = |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index a00c2a5e..f34e942d 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <nvgpu/timers.h> | 26 | #include <nvgpu/timers.h> |
27 | #include <nvgpu/bug.h> | 27 | #include <nvgpu/bug.h> |
28 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> | 28 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> |
29 | #include <nvgpu/falcon.h> | ||
29 | 30 | ||
30 | #include "gk20a/gk20a.h" | 31 | #include "gk20a/gk20a.h" |
31 | 32 | ||
@@ -100,295 +101,56 @@ int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) | |||
100 | return g->ops.pmu.pmu_mutex_release(pmu, id, token); | 101 | return g->ops.pmu.pmu_mutex_release(pmu, id, token); |
101 | } | 102 | } |
102 | 103 | ||
103 | /* queue */ | 104 | /* PMU falcon queue init */ |
104 | int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, | 105 | int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, |
105 | u32 id, union pmu_init_msg_pmu *init) | 106 | u32 id, union pmu_init_msg_pmu *init) |
106 | { | 107 | { |
107 | struct gk20a *g = gk20a_from_pmu(pmu); | 108 | struct gk20a *g = gk20a_from_pmu(pmu); |
108 | struct pmu_queue *queue = &pmu->queue[id]; | 109 | struct nvgpu_falcon_queue *queue = NULL; |
109 | int err; | 110 | u32 oflag = 0; |
110 | 111 | int err = 0; | |
111 | err = nvgpu_mutex_init(&queue->mutex); | ||
112 | if (err) | ||
113 | return err; | ||
114 | |||
115 | queue->id = id; | ||
116 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init); | ||
117 | queue->mutex_id = id; | ||
118 | |||
119 | nvgpu_pmu_dbg(g, "queue %d: index %d, offset 0x%08x, size 0x%08x", | ||
120 | id, queue->index, queue->offset, queue->size); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static int pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | ||
126 | u32 *head, bool set) | ||
127 | { | ||
128 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
129 | |||
130 | return g->ops.pmu.pmu_queue_head(pmu, queue, head, set); | ||
131 | } | ||
132 | |||
133 | static int pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | ||
134 | u32 *tail, bool set) | ||
135 | { | ||
136 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
137 | |||
138 | return g->ops.pmu.pmu_queue_tail(pmu, queue, tail, set); | ||
139 | } | ||
140 | |||
141 | static inline void pmu_queue_read(struct nvgpu_pmu *pmu, | ||
142 | u32 offset, u8 *dst, u32 size) | ||
143 | { | ||
144 | nvgpu_flcn_copy_from_dmem(pmu->flcn, offset, dst, size, 0); | ||
145 | } | ||
146 | |||
147 | static inline void pmu_queue_write(struct nvgpu_pmu *pmu, | ||
148 | u32 offset, u8 *src, u32 size) | ||
149 | { | ||
150 | nvgpu_flcn_copy_to_dmem(pmu->flcn, offset, src, size, 0); | ||
151 | } | ||
152 | |||
153 | |||
154 | static int pmu_queue_lock(struct nvgpu_pmu *pmu, | ||
155 | struct pmu_queue *queue) | ||
156 | { | ||
157 | int err; | ||
158 | |||
159 | if (PMU_IS_MESSAGE_QUEUE(queue->id)) | ||
160 | return 0; | ||
161 | 112 | ||
162 | if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { | 113 | if (PMU_IS_COMMAND_QUEUE(id)) { |
163 | nvgpu_mutex_acquire(&queue->mutex); | 114 | /* |
164 | return 0; | 115 | * set OFLAG_WRITE for command queue |
116 | * i.e, push from nvgpu & | ||
117 | * pop form falcon ucode | ||
118 | */ | ||
119 | oflag = OFLAG_WRITE; | ||
120 | } else if (PMU_IS_MESSAGE_QUEUE(id)) { | ||
121 | /* | ||
122 | * set OFLAG_READ for message queue | ||
123 | * i.e, push from falcon ucode & | ||
124 | * pop form nvgpu | ||
125 | */ | ||
126 | oflag = OFLAG_READ; | ||
127 | } else { | ||
128 | nvgpu_err(g, "invalid queue-id %d", id); | ||
129 | err = -EINVAL; | ||
130 | goto exit; | ||
165 | } | 131 | } |
166 | 132 | ||
167 | err = nvgpu_pmu_mutex_acquire(pmu, queue->mutex_id, &queue->mutex_lock); | 133 | /* init queue parameters */ |
168 | return err; | 134 | queue = &pmu->queue[id]; |
169 | } | 135 | queue->id = id; |
170 | 136 | queue->oflag = oflag; | |
171 | static int pmu_queue_unlock(struct nvgpu_pmu *pmu, | 137 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init); |
172 | struct pmu_queue *queue) | ||
173 | { | ||
174 | int err; | ||
175 | |||
176 | if (PMU_IS_MESSAGE_QUEUE(queue->id)) | ||
177 | return 0; | ||
178 | 138 | ||
179 | if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { | 139 | err = nvgpu_flcn_queue_init(pmu->flcn, queue); |
180 | nvgpu_mutex_release(&queue->mutex); | 140 | if (err != 0) { |
181 | return 0; | 141 | nvgpu_err(g, "queue-%d init failed", queue->id); |
182 | } | 142 | } |
183 | 143 | ||
184 | err = nvgpu_pmu_mutex_release(pmu, queue->mutex_id, &queue->mutex_lock); | 144 | exit: |
185 | return err; | 145 | return err; |
186 | } | 146 | } |
187 | 147 | ||
188 | /* called by pmu_read_message, no lock */ | ||
189 | bool nvgpu_pmu_queue_is_empty(struct nvgpu_pmu *pmu, | ||
190 | struct pmu_queue *queue) | ||
191 | { | ||
192 | u32 head, tail; | ||
193 | |||
194 | pmu_queue_head(pmu, queue, &head, QUEUE_GET); | ||
195 | if (queue->opened && queue->oflag == OFLAG_READ) | ||
196 | tail = queue->position; | ||
197 | else | ||
198 | pmu_queue_tail(pmu, queue, &tail, QUEUE_GET); | ||
199 | |||
200 | return head == tail; | ||
201 | } | ||
202 | |||
203 | static bool pmu_queue_has_room(struct nvgpu_pmu *pmu, | ||
204 | struct pmu_queue *queue, u32 size, bool *need_rewind) | ||
205 | { | ||
206 | u32 head, tail; | ||
207 | bool rewind = false; | ||
208 | unsigned int free; | ||
209 | |||
210 | size = ALIGN(size, QUEUE_ALIGNMENT); | ||
211 | |||
212 | pmu_queue_head(pmu, queue, &head, QUEUE_GET); | ||
213 | pmu_queue_tail(pmu, queue, &tail, QUEUE_GET); | ||
214 | if (head >= tail) { | ||
215 | free = queue->offset + queue->size - head; | ||
216 | free -= PMU_CMD_HDR_SIZE; | ||
217 | |||
218 | if (size > free) { | ||
219 | rewind = true; | ||
220 | head = queue->offset; | ||
221 | } | ||
222 | } | ||
223 | |||
224 | if (head < tail) | ||
225 | free = tail - head - 1; | ||
226 | |||
227 | if (need_rewind) | ||
228 | *need_rewind = rewind; | ||
229 | |||
230 | return size <= free; | ||
231 | } | ||
232 | |||
233 | static int pmu_queue_push(struct nvgpu_pmu *pmu, | ||
234 | struct pmu_queue *queue, void *data, u32 size) | ||
235 | { | ||
236 | struct gk20a *g = pmu->g; | ||
237 | |||
238 | nvgpu_log_fn(g, " "); | ||
239 | |||
240 | if (!queue->opened && queue->oflag == OFLAG_WRITE) { | ||
241 | nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for write"); | ||
242 | return -EINVAL; | ||
243 | } | ||
244 | |||
245 | pmu_queue_write(pmu, queue->position, data, size); | ||
246 | queue->position += ALIGN(size, QUEUE_ALIGNMENT); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static int pmu_queue_pop(struct nvgpu_pmu *pmu, | ||
251 | struct pmu_queue *queue, void *data, u32 size, | ||
252 | u32 *bytes_read) | ||
253 | { | ||
254 | u32 head, tail, used; | ||
255 | |||
256 | *bytes_read = 0; | ||
257 | |||
258 | if (!queue->opened && queue->oflag == OFLAG_READ) { | ||
259 | nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for read"); | ||
260 | return -EINVAL; | ||
261 | } | ||
262 | |||
263 | pmu_queue_head(pmu, queue, &head, QUEUE_GET); | ||
264 | tail = queue->position; | ||
265 | |||
266 | if (head == tail) | ||
267 | return 0; | ||
268 | |||
269 | if (head > tail) | ||
270 | used = head - tail; | ||
271 | else | ||
272 | used = queue->offset + queue->size - tail; | ||
273 | |||
274 | if (size > used) { | ||
275 | nvgpu_warn(gk20a_from_pmu(pmu), | ||
276 | "queue size smaller than request read"); | ||
277 | size = used; | ||
278 | } | ||
279 | |||
280 | pmu_queue_read(pmu, tail, data, size); | ||
281 | queue->position += ALIGN(size, QUEUE_ALIGNMENT); | ||
282 | *bytes_read = size; | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static void pmu_queue_rewind(struct nvgpu_pmu *pmu, | ||
287 | struct pmu_queue *queue) | ||
288 | { | ||
289 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
290 | struct pmu_cmd cmd; | ||
291 | |||
292 | nvgpu_log_fn(g, " "); | ||
293 | |||
294 | if (!queue->opened) { | ||
295 | nvgpu_err(gk20a_from_pmu(pmu), "queue not opened"); | ||
296 | return; | ||
297 | } | ||
298 | |||
299 | if (queue->oflag == OFLAG_WRITE) { | ||
300 | cmd.hdr.unit_id = PMU_UNIT_REWIND; | ||
301 | cmd.hdr.size = PMU_CMD_HDR_SIZE; | ||
302 | pmu_queue_push(pmu, queue, &cmd, cmd.hdr.size); | ||
303 | nvgpu_pmu_dbg(g, "queue %d rewinded", queue->id); | ||
304 | } | ||
305 | |||
306 | queue->position = queue->offset; | ||
307 | } | ||
308 | |||
309 | /* open for read and lock the queue */ | ||
310 | static int pmu_queue_open_read(struct nvgpu_pmu *pmu, | ||
311 | struct pmu_queue *queue) | ||
312 | { | ||
313 | int err; | ||
314 | |||
315 | err = pmu_queue_lock(pmu, queue); | ||
316 | if (err) | ||
317 | return err; | ||
318 | |||
319 | if (queue->opened) | ||
320 | BUG(); | ||
321 | |||
322 | pmu_queue_tail(pmu, queue, &queue->position, QUEUE_GET); | ||
323 | queue->oflag = OFLAG_READ; | ||
324 | queue->opened = true; | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | /* open for write and lock the queue | ||
330 | * make sure there's enough free space for the write | ||
331 | * */ | ||
332 | static int pmu_queue_open_write(struct nvgpu_pmu *pmu, | ||
333 | struct pmu_queue *queue, u32 size) | ||
334 | { | ||
335 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
336 | bool rewind = false; | ||
337 | int err; | ||
338 | |||
339 | err = pmu_queue_lock(pmu, queue); | ||
340 | if (err) | ||
341 | return err; | ||
342 | |||
343 | if (queue->opened) | ||
344 | BUG(); | ||
345 | |||
346 | if (!pmu_queue_has_room(pmu, queue, size, &rewind)) { | ||
347 | nvgpu_pmu_dbg(g, "queue full: queue-id %d: index %d", | ||
348 | queue->id, queue->index); | ||
349 | pmu_queue_unlock(pmu, queue); | ||
350 | return -EAGAIN; | ||
351 | } | ||
352 | |||
353 | pmu_queue_head(pmu, queue, &queue->position, QUEUE_GET); | ||
354 | queue->oflag = OFLAG_WRITE; | ||
355 | queue->opened = true; | ||
356 | |||
357 | if (rewind) | ||
358 | pmu_queue_rewind(pmu, queue); | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | /* close and unlock the queue */ | ||
364 | static int pmu_queue_close(struct nvgpu_pmu *pmu, | ||
365 | struct pmu_queue *queue, bool commit) | ||
366 | { | ||
367 | if (!queue->opened) | ||
368 | return 0; | ||
369 | |||
370 | if (commit) { | ||
371 | if (queue->oflag == OFLAG_READ) | ||
372 | pmu_queue_tail(pmu, queue, | ||
373 | &queue->position, QUEUE_SET); | ||
374 | else | ||
375 | pmu_queue_head(pmu, queue, | ||
376 | &queue->position, QUEUE_SET); | ||
377 | } | ||
378 | |||
379 | queue->opened = false; | ||
380 | |||
381 | pmu_queue_unlock(pmu, queue); | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, | 148 | static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, |
387 | struct pmu_msg *msg, struct pmu_payload *payload, | 149 | struct pmu_msg *msg, struct pmu_payload *payload, |
388 | u32 queue_id) | 150 | u32 queue_id) |
389 | { | 151 | { |
390 | struct gk20a *g = gk20a_from_pmu(pmu); | 152 | struct gk20a *g = gk20a_from_pmu(pmu); |
391 | struct pmu_queue *queue; | 153 | struct nvgpu_falcon_queue *queue; |
392 | u32 in_size, out_size; | 154 | u32 in_size, out_size; |
393 | 155 | ||
394 | if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) | 156 | if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) |
@@ -459,7 +221,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, | |||
459 | u32 queue_id, unsigned long timeout_ms) | 221 | u32 queue_id, unsigned long timeout_ms) |
460 | { | 222 | { |
461 | struct gk20a *g = gk20a_from_pmu(pmu); | 223 | struct gk20a *g = gk20a_from_pmu(pmu); |
462 | struct pmu_queue *queue; | 224 | struct nvgpu_falcon_queue *queue; |
463 | struct nvgpu_timeout timeout; | 225 | struct nvgpu_timeout timeout; |
464 | int err; | 226 | int err; |
465 | 227 | ||
@@ -469,7 +231,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, | |||
469 | nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); | 231 | nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); |
470 | 232 | ||
471 | do { | 233 | do { |
472 | err = pmu_queue_open_write(pmu, queue, cmd->hdr.size); | 234 | err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size); |
473 | if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) | 235 | if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) |
474 | nvgpu_usleep_range(1000, 2000); | 236 | nvgpu_usleep_range(1000, 2000); |
475 | else | 237 | else |
@@ -477,15 +239,6 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, | |||
477 | } while (1); | 239 | } while (1); |
478 | 240 | ||
479 | if (err) | 241 | if (err) |
480 | goto clean_up; | ||
481 | |||
482 | pmu_queue_push(pmu, queue, cmd, cmd->hdr.size); | ||
483 | |||
484 | |||
485 | err = pmu_queue_close(pmu, queue, true); | ||
486 | |||
487 | clean_up: | ||
488 | if (err) | ||
489 | nvgpu_err(g, "fail to write cmd to queue %d", queue_id); | 242 | nvgpu_err(g, "fail to write cmd to queue %d", queue_id); |
490 | else | 243 | else |
491 | nvgpu_log_fn(g, "done"); | 244 | nvgpu_log_fn(g, "done"); |
@@ -840,8 +593,9 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg) | |||
840 | return err; | 593 | return err; |
841 | } | 594 | } |
842 | 595 | ||
843 | static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | 596 | static bool pmu_read_message(struct nvgpu_pmu *pmu, |
844 | struct pmu_msg *msg, int *status) | 597 | struct nvgpu_falcon_queue *queue, |
598 | struct pmu_msg *msg, int *status) | ||
845 | { | 599 | { |
846 | struct gk20a *g = gk20a_from_pmu(pmu); | 600 | struct gk20a *g = gk20a_from_pmu(pmu); |
847 | u32 read_size, bytes_read; | 601 | u32 read_size, bytes_read; |
@@ -849,17 +603,11 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | |||
849 | 603 | ||
850 | *status = 0; | 604 | *status = 0; |
851 | 605 | ||
852 | if (nvgpu_pmu_queue_is_empty(pmu, queue)) | 606 | if (nvgpu_flcn_queue_is_empty(pmu->flcn, queue)) { |
853 | return false; | ||
854 | |||
855 | err = pmu_queue_open_read(pmu, queue); | ||
856 | if (err) { | ||
857 | nvgpu_err(g, "fail to open queue %d for read", queue->id); | ||
858 | *status = err; | ||
859 | return false; | 607 | return false; |
860 | } | 608 | } |
861 | 609 | ||
862 | err = pmu_queue_pop(pmu, queue, &msg->hdr, | 610 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, |
863 | PMU_MSG_HDR_SIZE, &bytes_read); | 611 | PMU_MSG_HDR_SIZE, &bytes_read); |
864 | if (err || bytes_read != PMU_MSG_HDR_SIZE) { | 612 | if (err || bytes_read != PMU_MSG_HDR_SIZE) { |
865 | nvgpu_err(g, "fail to read msg from queue %d", queue->id); | 613 | nvgpu_err(g, "fail to read msg from queue %d", queue->id); |
@@ -868,9 +616,14 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | |||
868 | } | 616 | } |
869 | 617 | ||
870 | if (msg->hdr.unit_id == PMU_UNIT_REWIND) { | 618 | if (msg->hdr.unit_id == PMU_UNIT_REWIND) { |
871 | pmu_queue_rewind(pmu, queue); | 619 | err = nvgpu_flcn_queue_rewind(pmu->flcn, queue); |
620 | if (err != 0) { | ||
621 | nvgpu_err(g, "fail to rewind queue %d", queue->id); | ||
622 | *status = err | -EINVAL; | ||
623 | goto clean_up; | ||
624 | } | ||
872 | /* read again after rewind */ | 625 | /* read again after rewind */ |
873 | err = pmu_queue_pop(pmu, queue, &msg->hdr, | 626 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr, |
874 | PMU_MSG_HDR_SIZE, &bytes_read); | 627 | PMU_MSG_HDR_SIZE, &bytes_read); |
875 | if (err || bytes_read != PMU_MSG_HDR_SIZE) { | 628 | if (err || bytes_read != PMU_MSG_HDR_SIZE) { |
876 | nvgpu_err(g, | 629 | nvgpu_err(g, |
@@ -889,7 +642,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | |||
889 | 642 | ||
890 | if (msg->hdr.size > PMU_MSG_HDR_SIZE) { | 643 | if (msg->hdr.size > PMU_MSG_HDR_SIZE) { |
891 | read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; | 644 | read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; |
892 | err = pmu_queue_pop(pmu, queue, &msg->msg, | 645 | err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg, |
893 | read_size, &bytes_read); | 646 | read_size, &bytes_read); |
894 | if (err || bytes_read != read_size) { | 647 | if (err || bytes_read != read_size) { |
895 | nvgpu_err(g, | 648 | nvgpu_err(g, |
@@ -899,19 +652,9 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | |||
899 | } | 652 | } |
900 | } | 653 | } |
901 | 654 | ||
902 | err = pmu_queue_close(pmu, queue, true); | ||
903 | if (err) { | ||
904 | nvgpu_err(g, "fail to close queue %d", queue->id); | ||
905 | *status = err; | ||
906 | return false; | ||
907 | } | ||
908 | |||
909 | return true; | 655 | return true; |
910 | 656 | ||
911 | clean_up: | 657 | clean_up: |
912 | err = pmu_queue_close(pmu, queue, false); | ||
913 | if (err) | ||
914 | nvgpu_err(g, "fail to close queue %d", queue->id); | ||
915 | return false; | 658 | return false; |
916 | } | 659 | } |
917 | 660 | ||