summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2018-07-11 05:30:45 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-07-31 04:25:41 -0400
commit2d454db04fcc0c03e05b4665831e5780240d79b8 (patch)
treec18fd4bc302ea68e57e1e1d95c0f253e800bc043 /drivers/gpu/nvgpu/common/pmu
parentd32692ae2427693daf85b3c7b4e24cd36471dec6 (diff)
gpu: nvgpu: falcon queue support
-Renamed "struct pmu_queue" to "struct nvgpu_falcon_queue" & moved to falcon.h -Renamed pmu_queue_* functions to flcn_queue_* & moved to new file falcon_queue.c -Created ops for queue functions in struct nvgpu_falcon_queue to support different queue types like DMEM/FB-Q. -Created ops in nvgpu_falcon_engine_dependency_ops to add engine specific queue functionality & assigned correct HAL functions in hal*.c file. -Made changes in dependent functions as needed to replace struct pmu_queue & calling queue functions using nvgpu_falcon_queue data structure. -Replaced input param "struct nvgpu_pmu *pmu" with "struct gk20a *g" for pmu ops pmu_queue_head/pmu_queue_tail & also for functions gk20a_pmu_queue_head()/ gk20a_pmu_queue_tail(). -Made changes in nvgpu_pmu_queue_init() to use nvgpu_falcon_queue for PMU queue. -Modified Makefile to include falcon_queue.o -Modified Makefile.sources to include falcon_queue.c Change-Id: I956328f6631b7154267fd5a29eaa1826190d99d1 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1776070 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c5
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_fw.c12
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c353
3 files changed, 59 insertions, 311 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index b9ac50be..327f67d3 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -517,8 +517,9 @@ int nvgpu_pmu_destroy(struct gk20a *g)
517 pmu->isr_enabled = false; 517 pmu->isr_enabled = false;
518 nvgpu_mutex_release(&pmu->isr_mutex); 518 nvgpu_mutex_release(&pmu->isr_mutex);
519 519
520 for (i = 0; i < PMU_QUEUE_COUNT; i++) 520 for (i = 0; i < PMU_QUEUE_COUNT; i++) {
521 nvgpu_mutex_destroy(&pmu->queue[i].mutex); 521 nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]);
522 }
522 523
523 nvgpu_pmu_state_change(g, PMU_STATE_OFF, false); 524 nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);
524 pmu->pmu_ready = false; 525 pmu->pmu_ready = false;
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
index 359c19ca..53eae49a 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c
@@ -848,7 +848,8 @@ static void perfmon_cmd_init_set_mov_avg_v1(struct pmu_perfmon_cmd *pc,
848 init->samples_in_moving_avg = value; 848 init->samples_in_moving_avg = value;
849} 849}
850 850
851static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue, 851static void get_pmu_init_msg_pmu_queue_params_v1(
852 struct nvgpu_falcon_queue *queue,
852 u32 id, void *pmu_init_msg) 853 u32 id, void *pmu_init_msg)
853{ 854{
854 struct pmu_init_msg_pmu_v1 *init = 855 struct pmu_init_msg_pmu_v1 *init =
@@ -859,7 +860,8 @@ static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue,
859 queue->size = init->queue_info[id].size; 860 queue->size = init->queue_info[id].size;
860} 861}
861 862
862static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue, 863static void get_pmu_init_msg_pmu_queue_params_v4(
864 struct nvgpu_falcon_queue *queue,
863 u32 id, void *pmu_init_msg) 865 u32 id, void *pmu_init_msg)
864{ 866{
865 struct pmu_init_msg_pmu_v4 *init = pmu_init_msg; 867 struct pmu_init_msg_pmu_v4 *init = pmu_init_msg;
@@ -885,7 +887,8 @@ static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue,
885 queue->offset = init->queue_offset + current_ptr; 887 queue->offset = init->queue_offset + current_ptr;
886} 888}
887 889
888static void get_pmu_init_msg_pmu_queue_params_v5(struct pmu_queue *queue, 890static void get_pmu_init_msg_pmu_queue_params_v5(
891 struct nvgpu_falcon_queue *queue,
889 u32 id, void *pmu_init_msg) 892 u32 id, void *pmu_init_msg)
890{ 893{
891 struct pmu_init_msg_pmu_v5 *init = pmu_init_msg; 894 struct pmu_init_msg_pmu_v5 *init = pmu_init_msg;
@@ -911,7 +914,8 @@ static void get_pmu_init_msg_pmu_queue_params_v5(struct pmu_queue *queue,
911 queue->offset = init->queue_offset + current_ptr; 914 queue->offset = init->queue_offset + current_ptr;
912} 915}
913 916
914static void get_pmu_init_msg_pmu_queue_params_v3(struct pmu_queue *queue, 917static void get_pmu_init_msg_pmu_queue_params_v3(
918 struct nvgpu_falcon_queue *queue,
915 u32 id, void *pmu_init_msg) 919 u32 id, void *pmu_init_msg)
916{ 920{
917 struct pmu_init_msg_pmu_v3 *init = 921 struct pmu_init_msg_pmu_v3 *init =
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index a00c2a5e..f34e942d 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -26,6 +26,7 @@
26#include <nvgpu/timers.h> 26#include <nvgpu/timers.h>
27#include <nvgpu/bug.h> 27#include <nvgpu/bug.h>
28#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> 28#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
29#include <nvgpu/falcon.h>
29 30
30#include "gk20a/gk20a.h" 31#include "gk20a/gk20a.h"
31 32
@@ -100,295 +101,56 @@ int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
100 return g->ops.pmu.pmu_mutex_release(pmu, id, token); 101 return g->ops.pmu.pmu_mutex_release(pmu, id, token);
101} 102}
102 103
103/* queue */ 104/* PMU falcon queue init */
104int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, 105int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu,
105 u32 id, union pmu_init_msg_pmu *init) 106 u32 id, union pmu_init_msg_pmu *init)
106{ 107{
107 struct gk20a *g = gk20a_from_pmu(pmu); 108 struct gk20a *g = gk20a_from_pmu(pmu);
108 struct pmu_queue *queue = &pmu->queue[id]; 109 struct nvgpu_falcon_queue *queue = NULL;
109 int err; 110 u32 oflag = 0;
110 111 int err = 0;
111 err = nvgpu_mutex_init(&queue->mutex);
112 if (err)
113 return err;
114
115 queue->id = id;
116 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init);
117 queue->mutex_id = id;
118
119 nvgpu_pmu_dbg(g, "queue %d: index %d, offset 0x%08x, size 0x%08x",
120 id, queue->index, queue->offset, queue->size);
121
122 return 0;
123}
124
125static int pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
126 u32 *head, bool set)
127{
128 struct gk20a *g = gk20a_from_pmu(pmu);
129
130 return g->ops.pmu.pmu_queue_head(pmu, queue, head, set);
131}
132
133static int pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
134 u32 *tail, bool set)
135{
136 struct gk20a *g = gk20a_from_pmu(pmu);
137
138 return g->ops.pmu.pmu_queue_tail(pmu, queue, tail, set);
139}
140
141static inline void pmu_queue_read(struct nvgpu_pmu *pmu,
142 u32 offset, u8 *dst, u32 size)
143{
144 nvgpu_flcn_copy_from_dmem(pmu->flcn, offset, dst, size, 0);
145}
146
147static inline void pmu_queue_write(struct nvgpu_pmu *pmu,
148 u32 offset, u8 *src, u32 size)
149{
150 nvgpu_flcn_copy_to_dmem(pmu->flcn, offset, src, size, 0);
151}
152
153
154static int pmu_queue_lock(struct nvgpu_pmu *pmu,
155 struct pmu_queue *queue)
156{
157 int err;
158
159 if (PMU_IS_MESSAGE_QUEUE(queue->id))
160 return 0;
161 112
162 if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { 113 if (PMU_IS_COMMAND_QUEUE(id)) {
163 nvgpu_mutex_acquire(&queue->mutex); 114 /*
164 return 0; 115 * set OFLAG_WRITE for command queue
116 * i.e, push from nvgpu &
117 * pop form falcon ucode
118 */
119 oflag = OFLAG_WRITE;
120 } else if (PMU_IS_MESSAGE_QUEUE(id)) {
121 /*
122 * set OFLAG_READ for message queue
123 * i.e, push from falcon ucode &
124 * pop form nvgpu
125 */
126 oflag = OFLAG_READ;
127 } else {
128 nvgpu_err(g, "invalid queue-id %d", id);
129 err = -EINVAL;
130 goto exit;
165 } 131 }
166 132
167 err = nvgpu_pmu_mutex_acquire(pmu, queue->mutex_id, &queue->mutex_lock); 133 /* init queue parameters */
168 return err; 134 queue = &pmu->queue[id];
169} 135 queue->id = id;
170 136 queue->oflag = oflag;
171static int pmu_queue_unlock(struct nvgpu_pmu *pmu, 137 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init);
172 struct pmu_queue *queue)
173{
174 int err;
175
176 if (PMU_IS_MESSAGE_QUEUE(queue->id))
177 return 0;
178 138
179 if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { 139 err = nvgpu_flcn_queue_init(pmu->flcn, queue);
180 nvgpu_mutex_release(&queue->mutex); 140 if (err != 0) {
181 return 0; 141 nvgpu_err(g, "queue-%d init failed", queue->id);
182 } 142 }
183 143
184 err = nvgpu_pmu_mutex_release(pmu, queue->mutex_id, &queue->mutex_lock); 144exit:
185 return err; 145 return err;
186} 146}
187 147
188/* called by pmu_read_message, no lock */
189bool nvgpu_pmu_queue_is_empty(struct nvgpu_pmu *pmu,
190 struct pmu_queue *queue)
191{
192 u32 head, tail;
193
194 pmu_queue_head(pmu, queue, &head, QUEUE_GET);
195 if (queue->opened && queue->oflag == OFLAG_READ)
196 tail = queue->position;
197 else
198 pmu_queue_tail(pmu, queue, &tail, QUEUE_GET);
199
200 return head == tail;
201}
202
203static bool pmu_queue_has_room(struct nvgpu_pmu *pmu,
204 struct pmu_queue *queue, u32 size, bool *need_rewind)
205{
206 u32 head, tail;
207 bool rewind = false;
208 unsigned int free;
209
210 size = ALIGN(size, QUEUE_ALIGNMENT);
211
212 pmu_queue_head(pmu, queue, &head, QUEUE_GET);
213 pmu_queue_tail(pmu, queue, &tail, QUEUE_GET);
214 if (head >= tail) {
215 free = queue->offset + queue->size - head;
216 free -= PMU_CMD_HDR_SIZE;
217
218 if (size > free) {
219 rewind = true;
220 head = queue->offset;
221 }
222 }
223
224 if (head < tail)
225 free = tail - head - 1;
226
227 if (need_rewind)
228 *need_rewind = rewind;
229
230 return size <= free;
231}
232
233static int pmu_queue_push(struct nvgpu_pmu *pmu,
234 struct pmu_queue *queue, void *data, u32 size)
235{
236 struct gk20a *g = pmu->g;
237
238 nvgpu_log_fn(g, " ");
239
240 if (!queue->opened && queue->oflag == OFLAG_WRITE) {
241 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for write");
242 return -EINVAL;
243 }
244
245 pmu_queue_write(pmu, queue->position, data, size);
246 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
247 return 0;
248}
249
250static int pmu_queue_pop(struct nvgpu_pmu *pmu,
251 struct pmu_queue *queue, void *data, u32 size,
252 u32 *bytes_read)
253{
254 u32 head, tail, used;
255
256 *bytes_read = 0;
257
258 if (!queue->opened && queue->oflag == OFLAG_READ) {
259 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for read");
260 return -EINVAL;
261 }
262
263 pmu_queue_head(pmu, queue, &head, QUEUE_GET);
264 tail = queue->position;
265
266 if (head == tail)
267 return 0;
268
269 if (head > tail)
270 used = head - tail;
271 else
272 used = queue->offset + queue->size - tail;
273
274 if (size > used) {
275 nvgpu_warn(gk20a_from_pmu(pmu),
276 "queue size smaller than request read");
277 size = used;
278 }
279
280 pmu_queue_read(pmu, tail, data, size);
281 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
282 *bytes_read = size;
283 return 0;
284}
285
286static void pmu_queue_rewind(struct nvgpu_pmu *pmu,
287 struct pmu_queue *queue)
288{
289 struct gk20a *g = gk20a_from_pmu(pmu);
290 struct pmu_cmd cmd;
291
292 nvgpu_log_fn(g, " ");
293
294 if (!queue->opened) {
295 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened");
296 return;
297 }
298
299 if (queue->oflag == OFLAG_WRITE) {
300 cmd.hdr.unit_id = PMU_UNIT_REWIND;
301 cmd.hdr.size = PMU_CMD_HDR_SIZE;
302 pmu_queue_push(pmu, queue, &cmd, cmd.hdr.size);
303 nvgpu_pmu_dbg(g, "queue %d rewinded", queue->id);
304 }
305
306 queue->position = queue->offset;
307}
308
309/* open for read and lock the queue */
310static int pmu_queue_open_read(struct nvgpu_pmu *pmu,
311 struct pmu_queue *queue)
312{
313 int err;
314
315 err = pmu_queue_lock(pmu, queue);
316 if (err)
317 return err;
318
319 if (queue->opened)
320 BUG();
321
322 pmu_queue_tail(pmu, queue, &queue->position, QUEUE_GET);
323 queue->oflag = OFLAG_READ;
324 queue->opened = true;
325
326 return 0;
327}
328
329/* open for write and lock the queue
330 * make sure there's enough free space for the write
331 * */
332static int pmu_queue_open_write(struct nvgpu_pmu *pmu,
333 struct pmu_queue *queue, u32 size)
334{
335 struct gk20a *g = gk20a_from_pmu(pmu);
336 bool rewind = false;
337 int err;
338
339 err = pmu_queue_lock(pmu, queue);
340 if (err)
341 return err;
342
343 if (queue->opened)
344 BUG();
345
346 if (!pmu_queue_has_room(pmu, queue, size, &rewind)) {
347 nvgpu_pmu_dbg(g, "queue full: queue-id %d: index %d",
348 queue->id, queue->index);
349 pmu_queue_unlock(pmu, queue);
350 return -EAGAIN;
351 }
352
353 pmu_queue_head(pmu, queue, &queue->position, QUEUE_GET);
354 queue->oflag = OFLAG_WRITE;
355 queue->opened = true;
356
357 if (rewind)
358 pmu_queue_rewind(pmu, queue);
359
360 return 0;
361}
362
363/* close and unlock the queue */
364static int pmu_queue_close(struct nvgpu_pmu *pmu,
365 struct pmu_queue *queue, bool commit)
366{
367 if (!queue->opened)
368 return 0;
369
370 if (commit) {
371 if (queue->oflag == OFLAG_READ)
372 pmu_queue_tail(pmu, queue,
373 &queue->position, QUEUE_SET);
374 else
375 pmu_queue_head(pmu, queue,
376 &queue->position, QUEUE_SET);
377 }
378
379 queue->opened = false;
380
381 pmu_queue_unlock(pmu, queue);
382
383 return 0;
384}
385
386static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, 148static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
387 struct pmu_msg *msg, struct pmu_payload *payload, 149 struct pmu_msg *msg, struct pmu_payload *payload,
388 u32 queue_id) 150 u32 queue_id)
389{ 151{
390 struct gk20a *g = gk20a_from_pmu(pmu); 152 struct gk20a *g = gk20a_from_pmu(pmu);
391 struct pmu_queue *queue; 153 struct nvgpu_falcon_queue *queue;
392 u32 in_size, out_size; 154 u32 in_size, out_size;
393 155
394 if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) 156 if (!PMU_IS_SW_COMMAND_QUEUE(queue_id))
@@ -459,7 +221,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
459 u32 queue_id, unsigned long timeout_ms) 221 u32 queue_id, unsigned long timeout_ms)
460{ 222{
461 struct gk20a *g = gk20a_from_pmu(pmu); 223 struct gk20a *g = gk20a_from_pmu(pmu);
462 struct pmu_queue *queue; 224 struct nvgpu_falcon_queue *queue;
463 struct nvgpu_timeout timeout; 225 struct nvgpu_timeout timeout;
464 int err; 226 int err;
465 227
@@ -469,7 +231,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
469 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); 231 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
470 232
471 do { 233 do {
472 err = pmu_queue_open_write(pmu, queue, cmd->hdr.size); 234 err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size);
473 if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout)) 235 if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout))
474 nvgpu_usleep_range(1000, 2000); 236 nvgpu_usleep_range(1000, 2000);
475 else 237 else
@@ -477,15 +239,6 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
477 } while (1); 239 } while (1);
478 240
479 if (err) 241 if (err)
480 goto clean_up;
481
482 pmu_queue_push(pmu, queue, cmd, cmd->hdr.size);
483
484
485 err = pmu_queue_close(pmu, queue, true);
486
487clean_up:
488 if (err)
489 nvgpu_err(g, "fail to write cmd to queue %d", queue_id); 242 nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
490 else 243 else
491 nvgpu_log_fn(g, "done"); 244 nvgpu_log_fn(g, "done");
@@ -840,8 +593,9 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
840 return err; 593 return err;
841} 594}
842 595
843static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue, 596static bool pmu_read_message(struct nvgpu_pmu *pmu,
844 struct pmu_msg *msg, int *status) 597 struct nvgpu_falcon_queue *queue,
598 struct pmu_msg *msg, int *status)
845{ 599{
846 struct gk20a *g = gk20a_from_pmu(pmu); 600 struct gk20a *g = gk20a_from_pmu(pmu);
847 u32 read_size, bytes_read; 601 u32 read_size, bytes_read;
@@ -849,17 +603,11 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
849 603
850 *status = 0; 604 *status = 0;
851 605
852 if (nvgpu_pmu_queue_is_empty(pmu, queue)) 606 if (nvgpu_flcn_queue_is_empty(pmu->flcn, queue)) {
853 return false;
854
855 err = pmu_queue_open_read(pmu, queue);
856 if (err) {
857 nvgpu_err(g, "fail to open queue %d for read", queue->id);
858 *status = err;
859 return false; 607 return false;
860 } 608 }
861 609
862 err = pmu_queue_pop(pmu, queue, &msg->hdr, 610 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr,
863 PMU_MSG_HDR_SIZE, &bytes_read); 611 PMU_MSG_HDR_SIZE, &bytes_read);
864 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 612 if (err || bytes_read != PMU_MSG_HDR_SIZE) {
865 nvgpu_err(g, "fail to read msg from queue %d", queue->id); 613 nvgpu_err(g, "fail to read msg from queue %d", queue->id);
@@ -868,9 +616,14 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
868 } 616 }
869 617
870 if (msg->hdr.unit_id == PMU_UNIT_REWIND) { 618 if (msg->hdr.unit_id == PMU_UNIT_REWIND) {
871 pmu_queue_rewind(pmu, queue); 619 err = nvgpu_flcn_queue_rewind(pmu->flcn, queue);
620 if (err != 0) {
621 nvgpu_err(g, "fail to rewind queue %d", queue->id);
622 *status = err | -EINVAL;
623 goto clean_up;
624 }
872 /* read again after rewind */ 625 /* read again after rewind */
873 err = pmu_queue_pop(pmu, queue, &msg->hdr, 626 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr,
874 PMU_MSG_HDR_SIZE, &bytes_read); 627 PMU_MSG_HDR_SIZE, &bytes_read);
875 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 628 if (err || bytes_read != PMU_MSG_HDR_SIZE) {
876 nvgpu_err(g, 629 nvgpu_err(g,
@@ -889,7 +642,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
889 642
890 if (msg->hdr.size > PMU_MSG_HDR_SIZE) { 643 if (msg->hdr.size > PMU_MSG_HDR_SIZE) {
891 read_size = msg->hdr.size - PMU_MSG_HDR_SIZE; 644 read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
892 err = pmu_queue_pop(pmu, queue, &msg->msg, 645 err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg,
893 read_size, &bytes_read); 646 read_size, &bytes_read);
894 if (err || bytes_read != read_size) { 647 if (err || bytes_read != read_size) {
895 nvgpu_err(g, 648 nvgpu_err(g,
@@ -899,19 +652,9 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
899 } 652 }
900 } 653 }
901 654
902 err = pmu_queue_close(pmu, queue, true);
903 if (err) {
904 nvgpu_err(g, "fail to close queue %d", queue->id);
905 *status = err;
906 return false;
907 }
908
909 return true; 655 return true;
910 656
911clean_up: 657clean_up:
912 err = pmu_queue_close(pmu, queue, false);
913 if (err)
914 nvgpu_err(g, "fail to close queue %d", queue->id);
915 return false; 658 return false;
916} 659}
917 660