summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/falcon
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2018-07-11 05:30:45 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-07-31 04:25:41 -0400
commit2d454db04fcc0c03e05b4665831e5780240d79b8 (patch)
treec18fd4bc302ea68e57e1e1d95c0f253e800bc043 /drivers/gpu/nvgpu/common/falcon
parentd32692ae2427693daf85b3c7b4e24cd36471dec6 (diff)
gpu: nvgpu: falcon queue support
-Renamed "struct pmu_queue" to "struct nvgpu_falcon_queue" & moved to falcon.h -Renamed pmu_queue_* functions to flcn_queue_* & moved to new file falcon_queue.c -Created ops for queue functions in struct nvgpu_falcon_queue to support different queue types like DMEM/FB-Q. -Created ops in nvgpu_falcon_engine_dependency_ops to add engine specific queue functionality & assigned correct HAL functions in hal*.c file. -Made changes in dependent functions as needed to replace struct pmu_queue & calling queue functions using nvgpu_falcon_queue data structure. -Replaced input param "struct nvgpu_pmu *pmu" with "struct gk20a *g" for pmu ops pmu_queue_head/pmu_queue_tail & also for functions gk20a_pmu_queue_head()/ gk20a_pmu_queue_tail(). -Made changes in nvgpu_pmu_queue_init() to use nvgpu_falcon_queue for PMU queue. -Modified Makefile to include falcon_queue.o -Modified Makefile.sources to include falcon_queue.c Change-Id: I956328f6631b7154267fd5a29eaa1826190d99d1 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1776070 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/falcon')
-rw-r--r--drivers/gpu/nvgpu/common/falcon/falcon_queue.c422
1 files changed, 422 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c
new file mode 100644
index 00000000..6834821c
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c
@@ -0,0 +1,422 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/lock.h>
24#include <nvgpu/timers.h>
25#include <nvgpu/pmu.h>
26#include <nvgpu/falcon.h>
27
28/* DMEM-Q specific ops */
29static int flcn_queue_head_dmem(struct nvgpu_falcon *flcn,
30 struct nvgpu_falcon_queue *queue, u32 *head, bool set)
31{
32 int err = -ENOSYS;
33
34 if (flcn->flcn_engine_dep_ops.queue_head != NULL) {
35 err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue,
36 head, set);
37 }
38
39 return err;
40}
41
42static int flcn_queue_tail_dmem(struct nvgpu_falcon *flcn,
43 struct nvgpu_falcon_queue *queue, u32 *tail, bool set)
44{
45 int err = -ENOSYS;
46
47 if (flcn->flcn_engine_dep_ops.queue_tail != NULL) {
48 err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue,
49 tail, set);
50 }
51
52 return err;
53}
54
55static bool flcn_queue_has_room_dmem(struct nvgpu_falcon *flcn,
56 struct nvgpu_falcon_queue *queue, u32 size, bool *need_rewind)
57{
58 u32 q_head = 0;
59 u32 q_tail = 0;
60 u32 q_free = 0;
61 bool q_rewind = false;
62 int err = 0;
63
64 size = ALIGN(size, QUEUE_ALIGNMENT);
65
66 err = queue->head(flcn, queue, &q_head, QUEUE_GET);
67 if (err != 0) {
68 nvgpu_err(flcn->g, "queue head GET failed");
69 goto exit;
70 }
71
72 err = queue->tail(flcn, queue, &q_tail, QUEUE_GET);
73 if (err != 0) {
74 nvgpu_err(flcn->g, "queue tail GET failed");
75 goto exit;
76 }
77
78 if (q_head >= q_tail) {
79 q_free = queue->offset + queue->size - q_head;
80 q_free -= (u32)PMU_CMD_HDR_SIZE;
81
82 if (size > q_free) {
83 q_rewind = true;
84 q_head = queue->offset;
85 }
86 }
87
88 if (q_head < q_tail) {
89 q_free = q_tail - q_head - 1U;
90 }
91
92 if (need_rewind != NULL) {
93 *need_rewind = q_rewind;
94 }
95
96exit:
97 return size <= q_free;
98}
99
100static int flcn_queue_push_dmem(struct nvgpu_falcon *flcn,
101 struct nvgpu_falcon_queue *queue, void *data, u32 size)
102{
103 int err = 0;
104
105 err = nvgpu_flcn_copy_to_dmem(flcn, queue->position, data, size, 0);
106 if (err != 0) {
107 nvgpu_err(flcn->g, "flcn-%d, queue-%d", flcn->flcn_id,
108 queue->id);
109 nvgpu_err(flcn->g, "dmem queue write failed");
110 goto exit;
111 }
112
113 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
114
115exit:
116 return err;
117}
118
119static int flcn_queue_pop_dmem(struct nvgpu_falcon *flcn,
120 struct nvgpu_falcon_queue *queue, void *data, u32 size,
121 u32 *bytes_read)
122{
123 struct gk20a *g = flcn->g;
124 u32 q_tail = queue->position;
125 u32 q_head = 0;
126 u32 used = 0;
127 int err = 0;
128
129 *bytes_read = 0;
130
131 err = queue->head(flcn, queue, &q_head, QUEUE_GET);
132 if (err != 0) {
133 nvgpu_err(flcn->g, "flcn-%d, queue-%d, head GET failed",
134 flcn->flcn_id, queue->id);
135 goto exit;
136 }
137
138 if (q_head == q_tail) {
139 goto exit;
140 } else if (q_head > q_tail) {
141 used = q_head - q_tail;
142 } else {
143 used = queue->offset + queue->size - q_tail;
144 }
145
146 if (size > used) {
147 nvgpu_warn(g, "queue size smaller than request read");
148 size = used;
149 }
150
151 err = nvgpu_flcn_copy_from_dmem(flcn, q_tail, data, size, 0);
152 if (err != 0) {
153 nvgpu_err(g, "flcn-%d, queue-%d", flcn->flcn_id,
154 queue->id);
155 nvgpu_err(flcn->g, "dmem queue read failed");
156 goto exit;
157 }
158
159 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
160 *bytes_read = size;
161
162exit:
163 return err;
164}
165
166static int flcn_queue_rewind_dmem(struct nvgpu_falcon *flcn,
167 struct nvgpu_falcon_queue *queue)
168{
169 struct gk20a *g = flcn->g;
170 struct pmu_cmd cmd;
171 int err = 0;
172
173 if (queue->oflag == OFLAG_WRITE) {
174 cmd.hdr.unit_id = PMU_UNIT_REWIND;
175 cmd.hdr.size = (u8)PMU_CMD_HDR_SIZE;
176 err = queue->push(flcn, queue, &cmd, cmd.hdr.size);
177 if (err != 0) {
178 nvgpu_err(g, "flcn-%d queue-%d, rewind request failed",
179 flcn->flcn_id, queue->id);
180 goto exit;
181 } else {
182 nvgpu_pmu_dbg(g, "flcn-%d queue-%d, rewinded",
183 flcn->flcn_id, queue->id);
184 }
185 }
186
187 /* update queue position */
188 queue->position = queue->offset;
189
190 if (queue->oflag == OFLAG_READ) {
191 err = queue->tail(flcn, queue, &queue->position,
192 QUEUE_SET);
193 if (err != 0){
194 nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
195 flcn->flcn_id, queue->id);
196 goto exit;
197 }
198 }
199
200exit:
201 return err;
202}
203
204/* assign DMEM queue type specific ops */
205static void flcn_queue_init_dmem_queue(struct nvgpu_falcon *flcn,
206 struct nvgpu_falcon_queue *queue)
207{
208 queue->head = flcn_queue_head_dmem;
209 queue->tail = flcn_queue_tail_dmem;
210 queue->has_room = flcn_queue_has_room_dmem;
211 queue->push = flcn_queue_push_dmem;
212 queue->pop = flcn_queue_pop_dmem;
213 queue->rewind = flcn_queue_rewind_dmem;
214}
215
216static int flcn_queue_prepare_write(struct nvgpu_falcon *flcn,
217 struct nvgpu_falcon_queue *queue, u32 size)
218{
219 bool q_rewind = false;
220 int err = 0;
221
222 /* make sure there's enough free space for the write */
223 if (!queue->has_room(flcn, queue, size, &q_rewind)) {
224 nvgpu_pmu_dbg(flcn->g, "queue full: queue-id %d: index %d",
225 queue->id, queue->index);
226 err = -EAGAIN;
227 goto exit;
228 }
229
230 err = queue->head(flcn, queue, &queue->position, QUEUE_GET);
231 if (err != 0) {
232 nvgpu_err(flcn->g, "flcn-%d queue-%d, position GET failed",
233 flcn->flcn_id, queue->id);
234 goto exit;
235 }
236
237 if (q_rewind) {
238 err = queue->rewind(flcn, queue);
239 }
240
241exit:
242 return err;
243}
244
245/* queue public functions */
246
247/* queue push operation with lock */
248int nvgpu_flcn_queue_push(struct nvgpu_falcon *flcn,
249 struct nvgpu_falcon_queue *queue, void *data, u32 size)
250{
251 int err = 0;
252
253 if (queue->oflag != OFLAG_WRITE) {
254 nvgpu_err(flcn->g, "flcn-%d, queue-%d not opened for write",
255 flcn->flcn_id, queue->id);
256 err = -EINVAL;
257 goto exit;
258 }
259
260 /* acquire mutex */
261 nvgpu_mutex_acquire(&queue->mutex);
262
263 err = flcn_queue_prepare_write(flcn, queue, size);
264 if (err != 0) {
265 nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to open",
266 flcn->flcn_id, queue->id);
267 goto unlock_mutex;
268 }
269
270 err = queue->push(flcn, queue, data, size);
271 if (err != 0) {
272 nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to write",
273 flcn->flcn_id, queue->id);
274 }
275
276 err = queue->head(flcn, queue, &queue->position, QUEUE_SET);
277 if (err != 0){
278 nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
279 flcn->flcn_id, queue->id);
280 }
281
282unlock_mutex:
283 /* release mutex */
284 nvgpu_mutex_release(&queue->mutex);
285exit:
286 return err;
287}
288
289/* queue pop operation with lock */
290int nvgpu_flcn_queue_pop(struct nvgpu_falcon *flcn,
291 struct nvgpu_falcon_queue *queue, void *data, u32 size,
292 u32 *bytes_read)
293{
294 int err = 0;
295
296 if (queue->oflag != OFLAG_READ) {
297 nvgpu_err(flcn->g, "flcn-%d, queue-%d, not opened for read",
298 flcn->flcn_id, queue->id);
299 err = -EINVAL;
300 goto exit;
301 }
302
303 /* acquire mutex */
304 nvgpu_mutex_acquire(&queue->mutex);
305
306 err = queue->tail(flcn, queue, &queue->position, QUEUE_GET);
307 if (err != 0) {
308 nvgpu_err(flcn->g, "flcn-%d queue-%d, position GET failed",
309 flcn->flcn_id, queue->id);
310 goto unlock_mutex;
311 }
312
313 err = queue->pop(flcn, queue, data, size, bytes_read);
314 if (err != 0) {
315 nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to read",
316 flcn->flcn_id, queue->id);
317 }
318
319 err = queue->tail(flcn, queue, &queue->position, QUEUE_SET);
320 if (err != 0){
321 nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
322 flcn->flcn_id, queue->id);
323 }
324
325unlock_mutex:
326 /* release mutex */
327 nvgpu_mutex_release(&queue->mutex);
328exit:
329 return err;
330}
331
332int nvgpu_flcn_queue_rewind(struct nvgpu_falcon *flcn,
333 struct nvgpu_falcon_queue *queue)
334{
335 int err = 0;
336
337 /* acquire mutex */
338 nvgpu_mutex_acquire(&queue->mutex);
339
340 if (queue->rewind != NULL) {
341 err = queue->rewind(flcn, queue);
342 }
343
344 /* release mutex */
345 nvgpu_mutex_release(&queue->mutex);
346
347 return err;
348}
349
350/* queue is_empty check with lock */
351bool nvgpu_flcn_queue_is_empty(struct nvgpu_falcon *flcn,
352 struct nvgpu_falcon_queue *queue)
353{
354 u32 q_head = 0;
355 u32 q_tail = 0;
356 int err = 0;
357
358 /* acquire mutex */
359 nvgpu_mutex_acquire(&queue->mutex);
360
361 err = queue->head(flcn, queue, &q_head, QUEUE_GET);
362 if (err != 0) {
363 nvgpu_err(flcn->g, "flcn-%d queue-%d, head GET failed",
364 flcn->flcn_id, queue->id);
365 goto exit;
366 }
367
368 err = queue->tail(flcn, queue, &q_tail, QUEUE_GET);
369 if (err != 0) {
370 nvgpu_err(flcn->g, "flcn-%d queue-%d, tail GET failed",
371 flcn->flcn_id, queue->id);
372 goto exit;
373 }
374
375exit:
376 /* release mutex */
377 nvgpu_mutex_release(&queue->mutex);
378
379 return q_head == q_tail;
380}
381
382void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn,
383 struct nvgpu_falcon_queue *queue)
384{
385 nvgpu_log(flcn->g, gpu_dbg_pmu, "flcn id-%d q-id %d: index %d ",
386 flcn->flcn_id, queue->id, queue->index);
387
388 /* destroy mutex */
389 nvgpu_mutex_destroy(&queue->mutex);
390
391 /* clear data*/
392 memset(queue, 0, sizeof(struct nvgpu_falcon_queue));
393}
394
395int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn,
396 struct nvgpu_falcon_queue *queue)
397{
398 struct gk20a *g = flcn->g;
399 int err = 0;
400
401 nvgpu_log(g, gpu_dbg_pmu,
402 "flcn id-%d q-id %d: index %d, offset 0x%08x, size 0x%08x",
403 flcn->flcn_id, queue->id, queue->index,
404 queue->offset, queue->size);
405
406 /* init mutex */
407 err = nvgpu_mutex_init(&queue->mutex);
408 if (err != 0) {
409 goto exit;
410 }
411
412 flcn_queue_init_dmem_queue(flcn, queue);
413
414exit:
415 if (err != 0) {
416 nvgpu_err(flcn->g, "flcn-%d queue-%d, init failed",
417 flcn->flcn_id, queue->id);
418 }
419
420 return err;
421}
422