summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2018-09-23 05:01:17 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-27 11:33:56 -0400
commit4dafb2e4927dd7e43ee39d40153cc6c34ed29d27 (patch)
treed60c4d1c0a8df1977b0b6be9e3fbcca326b7f019
parent628e2c79017b83032a840fb85e136d3216dec9c4 (diff)
gpu: nvgpu: falcon engine EMEM queue support
-Removed _dmem postfix to some functions which can be common for DMEM & EMEM queue, and made changes as needed. -Defined flcn_queue_push_emem() & flcn_queue_pop_emem() functions to to read/write queue data to/from EMEM -Defined flcn_queue_init_emem_queue() function to assign EMEM specific functions to support EMEM queue type. -Defined QUEUE_TYPE_DMEM to support DMEM based queue. -Defined QUEUE_TYPE_EMEM to support EMEM based queue. -Modified nvgpu_flcn_queue_init() to call queue type flcn_queue_init_dmem/emem_queue() function to assign its ops. JIRA NVGPU-1161 Change-Id: I06333fa318b7ca4137c977ad63f5a857e7b36cc8 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1841084 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/falcon/falcon_queue.c172
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c1
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/falcon.h6
3 files changed, 136 insertions, 43 deletions
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c
index 6834821c..1d0a7a6c 100644
--- a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c
+++ b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c
@@ -25,8 +25,8 @@
25#include <nvgpu/pmu.h> 25#include <nvgpu/pmu.h>
26#include <nvgpu/falcon.h> 26#include <nvgpu/falcon.h>
27 27
28/* DMEM-Q specific ops */ 28/* common falcon queue ops */
29static int flcn_queue_head_dmem(struct nvgpu_falcon *flcn, 29static int flcn_queue_head(struct nvgpu_falcon *flcn,
30 struct nvgpu_falcon_queue *queue, u32 *head, bool set) 30 struct nvgpu_falcon_queue *queue, u32 *head, bool set)
31{ 31{
32 int err = -ENOSYS; 32 int err = -ENOSYS;
@@ -39,7 +39,7 @@ static int flcn_queue_head_dmem(struct nvgpu_falcon *flcn,
39 return err; 39 return err;
40} 40}
41 41
42static int flcn_queue_tail_dmem(struct nvgpu_falcon *flcn, 42static int flcn_queue_tail(struct nvgpu_falcon *flcn,
43 struct nvgpu_falcon_queue *queue, u32 *tail, bool set) 43 struct nvgpu_falcon_queue *queue, u32 *tail, bool set)
44{ 44{
45 int err = -ENOSYS; 45 int err = -ENOSYS;
@@ -52,7 +52,7 @@ static int flcn_queue_tail_dmem(struct nvgpu_falcon *flcn,
52 return err; 52 return err;
53} 53}
54 54
55static bool flcn_queue_has_room_dmem(struct nvgpu_falcon *flcn, 55static bool flcn_queue_has_room(struct nvgpu_falcon *flcn,
56 struct nvgpu_falcon_queue *queue, u32 size, bool *need_rewind) 56 struct nvgpu_falcon_queue *queue, u32 size, bool *need_rewind)
57{ 57{
58 u32 q_head = 0; 58 u32 q_head = 0;
@@ -97,16 +97,55 @@ exit:
97 return size <= q_free; 97 return size <= q_free;
98} 98}
99 99
100static int flcn_queue_push_dmem(struct nvgpu_falcon *flcn, 100static int flcn_queue_rewind(struct nvgpu_falcon *flcn,
101 struct nvgpu_falcon_queue *queue)
102{
103 struct gk20a *g = flcn->g;
104 struct pmu_cmd cmd;
105 int err = 0;
106
107 if (queue->oflag == OFLAG_WRITE) {
108 cmd.hdr.unit_id = PMU_UNIT_REWIND;
109 cmd.hdr.size = (u8)PMU_CMD_HDR_SIZE;
110 err = queue->push(flcn, queue, &cmd, cmd.hdr.size);
111 if (err != 0) {
112 nvgpu_err(g, "flcn-%d queue-%d, rewind request failed",
113 flcn->flcn_id, queue->id);
114 goto exit;
115 } else {
116 nvgpu_pmu_dbg(g, "flcn-%d queue-%d, rewinded",
117 flcn->flcn_id, queue->id);
118 }
119 }
120
121 /* update queue position */
122 queue->position = queue->offset;
123
124 if (queue->oflag == OFLAG_READ) {
125 err = queue->tail(flcn, queue, &queue->position,
126 QUEUE_SET);
127 if (err != 0){
128 nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
129 flcn->flcn_id, queue->id);
130 goto exit;
131 }
132 }
133
134exit:
135 return err;
136}
137
138/* EMEM-Q specific ops */
139static int flcn_queue_push_emem(struct nvgpu_falcon *flcn,
101 struct nvgpu_falcon_queue *queue, void *data, u32 size) 140 struct nvgpu_falcon_queue *queue, void *data, u32 size)
102{ 141{
103 int err = 0; 142 int err = 0;
104 143
105 err = nvgpu_flcn_copy_to_dmem(flcn, queue->position, data, size, 0); 144 err = nvgpu_flcn_copy_to_emem(flcn, queue->position, data, size, 0);
106 if (err != 0) { 145 if (err != 0) {
107 nvgpu_err(flcn->g, "flcn-%d, queue-%d", flcn->flcn_id, 146 nvgpu_err(flcn->g, "flcn-%d, queue-%d", flcn->flcn_id,
108 queue->id); 147 queue->id);
109 nvgpu_err(flcn->g, "dmem queue write failed"); 148 nvgpu_err(flcn->g, "emem queue write failed");
110 goto exit; 149 goto exit;
111 } 150 }
112 151
@@ -116,7 +155,7 @@ exit:
116 return err; 155 return err;
117} 156}
118 157
119static int flcn_queue_pop_dmem(struct nvgpu_falcon *flcn, 158static int flcn_queue_pop_emem(struct nvgpu_falcon *flcn,
120 struct nvgpu_falcon_queue *queue, void *data, u32 size, 159 struct nvgpu_falcon_queue *queue, void *data, u32 size,
121 u32 *bytes_read) 160 u32 *bytes_read)
122{ 161{
@@ -148,11 +187,11 @@ static int flcn_queue_pop_dmem(struct nvgpu_falcon *flcn,
148 size = used; 187 size = used;
149 } 188 }
150 189
151 err = nvgpu_flcn_copy_from_dmem(flcn, q_tail, data, size, 0); 190 err = nvgpu_flcn_copy_from_emem(flcn, q_tail, data, size, 0);
152 if (err != 0) { 191 if (err != 0) {
153 nvgpu_err(g, "flcn-%d, queue-%d", flcn->flcn_id, 192 nvgpu_err(g, "flcn-%d, queue-%d", flcn->flcn_id,
154 queue->id); 193 queue->id);
155 nvgpu_err(flcn->g, "dmem queue read failed"); 194 nvgpu_err(flcn->g, "emem queue read failed");
156 goto exit; 195 goto exit;
157 } 196 }
158 197
@@ -163,40 +202,81 @@ exit:
163 return err; 202 return err;
164} 203}
165 204
166static int flcn_queue_rewind_dmem(struct nvgpu_falcon *flcn, 205/* assign EMEM queue type specific ops */
167 struct nvgpu_falcon_queue *queue) 206static void flcn_queue_init_emem_queue(struct nvgpu_falcon *flcn,
207 struct nvgpu_falcon_queue *queue)
208{
209 queue->head = flcn_queue_head;
210 queue->tail = flcn_queue_tail;
211 queue->has_room = flcn_queue_has_room;
212 queue->rewind = flcn_queue_rewind;
213 queue->push = flcn_queue_push_emem;
214 queue->pop = flcn_queue_pop_emem;
215}
216
217/* DMEM-Q specific ops */
218static int flcn_queue_push_dmem(struct nvgpu_falcon *flcn,
219 struct nvgpu_falcon_queue *queue, void *data, u32 size)
220{
221 int err = 0;
222
223 err = nvgpu_flcn_copy_to_dmem(flcn, queue->position, data, size, 0);
224 if (err != 0) {
225 nvgpu_err(flcn->g, "flcn-%d, queue-%d", flcn->flcn_id,
226 queue->id);
227 nvgpu_err(flcn->g, "dmem queue write failed");
228 goto exit;
229 }
230
231 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
232
233exit:
234 return err;
235}
236
237static int flcn_queue_pop_dmem(struct nvgpu_falcon *flcn,
238 struct nvgpu_falcon_queue *queue, void *data, u32 size,
239 u32 *bytes_read)
168{ 240{
169 struct gk20a *g = flcn->g; 241 struct gk20a *g = flcn->g;
170 struct pmu_cmd cmd; 242 u32 q_tail = queue->position;
243 u32 q_head = 0;
244 u32 used = 0;
171 int err = 0; 245 int err = 0;
172 246
173 if (queue->oflag == OFLAG_WRITE) { 247 *bytes_read = 0;
174 cmd.hdr.unit_id = PMU_UNIT_REWIND; 248
175 cmd.hdr.size = (u8)PMU_CMD_HDR_SIZE; 249 err = queue->head(flcn, queue, &q_head, QUEUE_GET);
176 err = queue->push(flcn, queue, &cmd, cmd.hdr.size); 250 if (err != 0) {
177 if (err != 0) { 251 nvgpu_err(flcn->g, "flcn-%d, queue-%d, head GET failed",
178 nvgpu_err(g, "flcn-%d queue-%d, rewind request failed",
179 flcn->flcn_id, queue->id);
180 goto exit;
181 } else {
182 nvgpu_pmu_dbg(g, "flcn-%d queue-%d, rewinded",
183 flcn->flcn_id, queue->id); 252 flcn->flcn_id, queue->id);
184 } 253 goto exit;
185 } 254 }
186 255
187 /* update queue position */ 256 if (q_head == q_tail) {
188 queue->position = queue->offset; 257 goto exit;
258 } else if (q_head > q_tail) {
259 used = q_head - q_tail;
260 } else {
261 used = queue->offset + queue->size - q_tail;
262 }
189 263
190 if (queue->oflag == OFLAG_READ) { 264 if (size > used) {
191 err = queue->tail(flcn, queue, &queue->position, 265 nvgpu_warn(g, "queue size smaller than request read");
192 QUEUE_SET); 266 size = used;
193 if (err != 0){ 267 }
194 nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed", 268
195 flcn->flcn_id, queue->id); 269 err = nvgpu_flcn_copy_from_dmem(flcn, q_tail, data, size, 0);
196 goto exit; 270 if (err != 0) {
197 } 271 nvgpu_err(g, "flcn-%d, queue-%d", flcn->flcn_id,
272 queue->id);
273 nvgpu_err(flcn->g, "dmem queue read failed");
274 goto exit;
198 } 275 }
199 276
277 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
278 *bytes_read = size;
279
200exit: 280exit:
201 return err; 281 return err;
202} 282}
@@ -205,12 +285,12 @@ exit:
205static void flcn_queue_init_dmem_queue(struct nvgpu_falcon *flcn, 285static void flcn_queue_init_dmem_queue(struct nvgpu_falcon *flcn,
206 struct nvgpu_falcon_queue *queue) 286 struct nvgpu_falcon_queue *queue)
207{ 287{
208 queue->head = flcn_queue_head_dmem; 288 queue->head = flcn_queue_head;
209 queue->tail = flcn_queue_tail_dmem; 289 queue->tail = flcn_queue_tail;
210 queue->has_room = flcn_queue_has_room_dmem; 290 queue->has_room = flcn_queue_has_room;
211 queue->push = flcn_queue_push_dmem; 291 queue->push = flcn_queue_push_dmem;
212 queue->pop = flcn_queue_pop_dmem; 292 queue->pop = flcn_queue_pop_dmem;
213 queue->rewind = flcn_queue_rewind_dmem; 293 queue->rewind = flcn_queue_rewind;
214} 294}
215 295
216static int flcn_queue_prepare_write(struct nvgpu_falcon *flcn, 296static int flcn_queue_prepare_write(struct nvgpu_falcon *flcn,
@@ -403,13 +483,21 @@ int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn,
403 flcn->flcn_id, queue->id, queue->index, 483 flcn->flcn_id, queue->id, queue->index,
404 queue->offset, queue->size); 484 queue->offset, queue->size);
405 485
406 /* init mutex */ 486 switch (queue->queue_type) {
407 err = nvgpu_mutex_init(&queue->mutex); 487 case QUEUE_TYPE_DMEM:
408 if (err != 0) { 488 flcn_queue_init_dmem_queue(flcn, queue);
489 break;
490 case QUEUE_TYPE_EMEM:
491 flcn_queue_init_emem_queue(flcn, queue);
492 break;
493 default:
494 err = -EINVAL;
409 goto exit; 495 goto exit;
496 break;
410 } 497 }
411 498
412 flcn_queue_init_dmem_queue(flcn, queue); 499 /* init mutex */
500 err = nvgpu_mutex_init(&queue->mutex);
413 501
414exit: 502exit:
415 if (err != 0) { 503 if (err != 0) {
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index f116df13..16f9ba57 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -134,6 +134,7 @@ int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu,
134 queue = &pmu->queue[id]; 134 queue = &pmu->queue[id];
135 queue->id = id; 135 queue->id = id;
136 queue->oflag = oflag; 136 queue->oflag = oflag;
137 queue->queue_type = QUEUE_TYPE_DMEM;
137 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init); 138 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init);
138 139
139 err = nvgpu_flcn_queue_init(pmu->flcn, queue); 140 err = nvgpu_flcn_queue_init(pmu->flcn, queue);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/falcon.h b/drivers/gpu/nvgpu/include/nvgpu/falcon.h
index cf15061d..4fc97ee8 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/falcon.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/falcon.h
@@ -84,7 +84,7 @@
84#define FALCON_MAILBOX_0 0x0 84#define FALCON_MAILBOX_0 0x0
85#define FALCON_MAILBOX_1 0x1 85#define FALCON_MAILBOX_1 0x1
86#define FALCON_MAILBOX_COUNT 0x02 86#define FALCON_MAILBOX_COUNT 0x02
87#define FALCON_BLOCK_SIZE 0x100 87#define FALCON_BLOCK_SIZE 0x100U
88 88
89#define GET_IMEM_TAG(IMEM_ADDR) (IMEM_ADDR >> 8) 89#define GET_IMEM_TAG(IMEM_ADDR) (IMEM_ADDR >> 8)
90 90
@@ -168,6 +168,10 @@ struct gk20a;
168struct nvgpu_falcon; 168struct nvgpu_falcon;
169struct nvgpu_falcon_bl_info; 169struct nvgpu_falcon_bl_info;
170 170
171/* Queue Type */
172#define QUEUE_TYPE_DMEM 0x0U
173#define QUEUE_TYPE_EMEM 0x1U
174
171struct nvgpu_falcon_queue { 175struct nvgpu_falcon_queue {
172 176
173 /* Queue Type (queue_type) */ 177 /* Queue Type (queue_type) */