summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c489
1 files changed, 360 insertions, 129 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 17efe5ca..766ea749 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -40,6 +40,131 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
40 u32 hw_chid, bool add, 40 u32 hw_chid, bool add,
41 bool wait_for_finish); 41 bool wait_for_finish);
42 42
43u32 gk20a_fifo_get_engine_ids(struct gk20a *g,
44 u32 engine_id[], u32 engine_id_sz,
45 u32 engine_enum)
46{
47 struct fifo_gk20a *f = NULL;
48 u32 instance_cnt = 0;
49 u32 engine_id_idx;
50 u32 active_engine_id = 0;
51 struct fifo_engine_info_gk20a *info = NULL;
52
53 if (g && engine_id_sz && (engine_enum < ENGINE_INVAL_GK20A)) {
54 f = &g->fifo;
55 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
56 active_engine_id = f->active_engines_list[engine_id_idx];
57 info = &f->engine_info[active_engine_id];
58
59 if (info->engine_enum == engine_enum) {
60 if (instance_cnt < engine_id_sz) {
61 engine_id[instance_cnt] = active_engine_id;
62 ++instance_cnt;
63 } else {
64 gk20a_dbg_info("warning engine_id table sz is small %d",
65 engine_id_sz);
66 }
67 }
68 }
69 }
70 return instance_cnt;
71}
72
73struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 engine_id)
74{
75 struct fifo_gk20a *f = NULL;
76 u32 engine_id_idx;
77 struct fifo_engine_info_gk20a *info = NULL;
78
79 if (!g)
80 return info;
81
82 f = &g->fifo;
83
84 if (engine_id < f->max_engines) {
85 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
86 if (engine_id == f->active_engines_list[engine_id_idx]) {
87 info = &f->engine_info[engine_id];
88 break;
89 }
90 }
91 }
92
93 if (!info)
94 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id);
95
96 return info;
97}
98
99bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id)
100{
101 struct fifo_gk20a *f = NULL;
102 u32 engine_id_idx;
103 bool valid = false;
104
105 if (!g)
106 return valid;
107
108 f = &g->fifo;
109
110 if (engine_id < f->max_engines) {
111 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
112 if (engine_id == f->active_engines_list[engine_id_idx]) {
113 valid = true;
114 break;
115 }
116 }
117 }
118
119 if (!valid)
120 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id);
121
122 return valid;
123}
124
125u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g)
126{
127 u32 gr_engine_cnt = 0;
128 u32 gr_engine_id = FIFO_INVAL_ENGINE_ID;
129
130 /* Consider 1st available GR engine */
131 gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id,
132 1, ENGINE_GR_GK20A);
133
134 if (!gr_engine_cnt) {
135 gk20a_err(dev_from_gk20a(g), "No GR engine available on this device!\n");
136 }
137
138 return gr_engine_id;
139}
140
141u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g)
142{
143 u32 reset_mask = 0;
144 u32 engine_enum = ENGINE_INVAL_GK20A;
145 struct fifo_gk20a *f = NULL;
146 u32 engine_id_idx;
147 struct fifo_engine_info_gk20a *engine_info;
148 u32 active_engine_id = 0;
149
150 if (!g)
151 return reset_mask;
152
153 f = &g->fifo;
154
155 for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
156 active_engine_id = f->active_engines_list[engine_id_idx];
157 engine_info = &f->engine_info[active_engine_id];
158 engine_enum = engine_info->engine_enum;
159
160 if ((engine_enum == ENGINE_GRCE_GK20A) ||
161 (engine_enum == ENGINE_ASYNC_CE_GK20A))
162 reset_mask |= engine_info->reset_mask;
163 }
164
165 return reset_mask;
166}
167
43/* 168/*
44 * Link engine IDs to MMU IDs and vice versa. 169 * Link engine IDs to MMU IDs and vice versa.
45 */ 170 */
@@ -47,12 +172,14 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
47static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) 172static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
48{ 173{
49 u32 fault_id = ~0; 174 u32 fault_id = ~0;
175 struct fifo_engine_info_gk20a *engine_info;
50 176
51 if (engine_id < ENGINE_INVAL_GK20A) { 177 engine_info = gk20a_fifo_get_engine_info(g, engine_id);
52 struct fifo_engine_info_gk20a *info =
53 &g->fifo.engine_info[engine_id];
54 178
55 fault_id = info->fault_id; 179 if (engine_info) {
180 fault_id = engine_info->fault_id;
181 } else {
182 gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id);
56 } 183 }
57 return fault_id; 184 return fault_id;
58} 185}
@@ -60,18 +187,19 @@ static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
60static inline u32 gk20a_mmu_id_to_engine_id(struct gk20a *g, u32 fault_id) 187static inline u32 gk20a_mmu_id_to_engine_id(struct gk20a *g, u32 fault_id)
61{ 188{
62 u32 engine_id; 189 u32 engine_id;
63 u32 return_engine_id = ~0; 190 u32 active_engine_id;
191 struct fifo_engine_info_gk20a *engine_info;
192 struct fifo_gk20a *f = &g->fifo;
64 193
65 for (engine_id = 0; engine_id < ENGINE_INVAL_GK20A; engine_id++) { 194 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
66 struct fifo_engine_info_gk20a *info = 195 active_engine_id = f->active_engines_list[engine_id];
67 &g->fifo.engine_info[engine_id]; 196 engine_info = &g->fifo.engine_info[active_engine_id];
68 197
69 if (info->fault_id == fault_id) { 198 if (engine_info->fault_id == fault_id)
70 return_engine_id = engine_id;
71 break; 199 break;
72 } 200 active_engine_id = FIFO_INVAL_ENGINE_ID;
73 } 201 }
74 return return_engine_id; 202 return active_engine_id;
75} 203}
76 204
77int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, 205int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
@@ -82,10 +210,15 @@ int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
82 gk20a_dbg_info("engine type %d", engine_type); 210 gk20a_dbg_info("engine type %d", engine_type);
83 if (engine_type == top_device_info_type_enum_graphics_v()) 211 if (engine_type == top_device_info_type_enum_graphics_v())
84 ret = ENGINE_GR_GK20A; 212 ret = ENGINE_GR_GK20A;
85 else if (engine_type == top_device_info_type_enum_copy2_v()) { 213 else if ((engine_type >= top_device_info_type_enum_copy0_v()) &&
86 ret = ENGINE_CE2_GK20A; 214 (engine_type <= top_device_info_type_enum_copy2_v())) {
215 /* Lets consider all the CE engine have separate runlist at this point
216 * We can identify the ENGINE_GRCE_GK20A type CE using runlist_id
217 * comparsion logic with GR runlist_id in init_engine_info() */
218 ret = ENGINE_ASYNC_CE_GK20A;
219 /* inst_id starts from CE0 to CE2 */
87 if (inst_id) 220 if (inst_id)
88 *inst_id = 0x2; 221 *inst_id = (engine_type - top_device_info_type_enum_copy0_v());
89 } 222 }
90 else 223 else
91 gk20a_err(g->dev, "unknown engine %d", engine_type); 224 gk20a_err(g->dev, "unknown engine %d", engine_type);
@@ -108,12 +241,11 @@ static int init_engine_info(struct fifo_gk20a *f)
108 u32 inst_id = 0; 241 u32 inst_id = 0;
109 u32 pri_base = 0; 242 u32 pri_base = 0;
110 u32 fault_id = 0; 243 u32 fault_id = 0;
244 u32 gr_runlist_id = ~0;
111 245
112 gk20a_dbg_fn(""); 246 gk20a_dbg_fn("");
113 247
114 /* all we really care about finding is the graphics entry */ 248 f->num_engines = 0;
115 /* especially early on in sim it probably thinks it has more */
116 f->num_engines = 2;
117 249
118 for (i = 0; i < max_info_entries; i++) { 250 for (i = 0; i < max_info_entries; i++) {
119 u32 table_entry = gk20a_readl(f->g, top_device_info_r(i)); 251 u32 table_entry = gk20a_readl(f->g, top_device_info_r(i));
@@ -168,8 +300,7 @@ static int init_engine_info(struct fifo_gk20a *f)
168 g->ops.fifo.engine_enum_from_type(g, 300 g->ops.fifo.engine_enum_from_type(g,
169 engine_type, &inst_id); 301 engine_type, &inst_id);
170 } else if (entry == top_device_info_entry_data_v()) { 302 } else if (entry == top_device_info_entry_data_v()) {
171 /* gk20a don't support device_info_data 303 /* gk20a doesn't support device_info_data packet parsing */
172 packet parsing */
173 if (g->ops.fifo.device_info_data_parse) 304 if (g->ops.fifo.device_info_data_parse)
174 g->ops.fifo.device_info_data_parse(g, 305 g->ops.fifo.device_info_data_parse(g,
175 table_entry, &inst_id, &pri_base, 306 table_entry, &inst_id, &pri_base,
@@ -179,7 +310,7 @@ static int init_engine_info(struct fifo_gk20a *f)
179 if (!top_device_info_chain_v(table_entry)) { 310 if (!top_device_info_chain_v(table_entry)) {
180 if (engine_enum < ENGINE_INVAL_GK20A) { 311 if (engine_enum < ENGINE_INVAL_GK20A) {
181 struct fifo_engine_info_gk20a *info = 312 struct fifo_engine_info_gk20a *info =
182 &g->fifo.engine_info[engine_enum]; 313 &g->fifo.engine_info[engine_id];
183 314
184 info->intr_mask |= BIT(intr_id); 315 info->intr_mask |= BIT(intr_id);
185 info->reset_mask |= BIT(reset_id); 316 info->reset_mask |= BIT(reset_id);
@@ -188,11 +319,25 @@ static int init_engine_info(struct fifo_gk20a *f)
188 info->inst_id = inst_id; 319 info->inst_id = inst_id;
189 info->pri_base = pri_base; 320 info->pri_base = pri_base;
190 321
191 if (!fault_id && 322 if (engine_enum == ENGINE_GR_GK20A)
192 (engine_enum == ENGINE_CE2_GK20A)) 323 gr_runlist_id = runlist_id;
324
325 /* GR and GR_COPY shares same runlist_id */
326 if ((engine_enum == ENGINE_ASYNC_CE_GK20A) &&
327 (gr_runlist_id == runlist_id))
328 engine_enum = ENGINE_GRCE_GK20A;
329
330 info->engine_enum = engine_enum;
331
332 if (!fault_id && (engine_enum == ENGINE_GRCE_GK20A))
193 fault_id = 0x1b; 333 fault_id = 0x1b;
194 info->fault_id = fault_id; 334 info->fault_id = fault_id;
195 335
336 /* engine_id starts from 0 to NV_HOST_NUM_ENGINES */
337 f->active_engines_list[f->num_engines] = engine_id;
338
339 ++f->num_engines;
340
196 engine_enum = ENGINE_INVAL_GK20A; 341 engine_enum = ENGINE_INVAL_GK20A;
197 } 342 }
198 } 343 }
@@ -204,13 +349,19 @@ static int init_engine_info(struct fifo_gk20a *f)
204u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g) 349u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g)
205{ 350{
206 u32 eng_intr_mask = 0; 351 u32 eng_intr_mask = 0;
207 int i = 0; 352 int i;
353 u32 active_engine_id = 0;
354 u32 engine_enum = ENGINE_INVAL_GK20A;
208 355
209 for (i = 0; i < g->fifo.max_engines; i++) { 356 for (i = 0; i < g->fifo.num_engines; i++) {
210 u32 intr_mask = g->fifo.engine_info[i].intr_mask; 357 u32 intr_mask;
211 if (i == ENGINE_CE2_GK20A && 358 active_engine_id = g->fifo.active_engines_list[i];
359 intr_mask = g->fifo.engine_info[active_engine_id].intr_mask;
360 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
361 if (((engine_enum == ENGINE_GRCE_GK20A) ||
362 (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
212 (!g->ops.ce2.isr_stall || !g->ops.ce2.isr_nonstall)) 363 (!g->ops.ce2.isr_stall || !g->ops.ce2.isr_nonstall))
213 continue; 364 continue;
214 365
215 eng_intr_mask |= intr_mask; 366 eng_intr_mask |= intr_mask;
216 } 367 }
@@ -218,13 +369,44 @@ u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g)
218 return eng_intr_mask; 369 return eng_intr_mask;
219} 370}
220 371
372void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
373{
374 u32 i;
375 u32 runlist_id;
376 struct fifo_runlist_info_gk20a *runlist;
377 struct gk20a *g = NULL;
378
379 if (!f || !f->runlist_info)
380 return;
381
382 g = f->g;
383
384 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
385 runlist = &f->runlist_info[runlist_id];
386 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
387 gk20a_gmmu_free(g, &runlist->mem[i]);
388 }
389
390 kfree(runlist->active_channels);
391 runlist->active_channels = NULL;
392
393 kfree(runlist->active_tsgs);
394 runlist->active_tsgs = NULL;
395
396 mutex_destroy(&runlist->mutex);
397
398 }
399 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
400 f->max_runlists));
401
402 kfree(f->runlist_info);
403 f->runlist_info = NULL;
404 f->max_runlists = 0;
405}
406
221static void gk20a_remove_fifo_support(struct fifo_gk20a *f) 407static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
222{ 408{
223 struct gk20a *g = f->g; 409 struct gk20a *g = f->g;
224 struct fifo_engine_info_gk20a *engine_info;
225 struct fifo_runlist_info_gk20a *runlist;
226 u32 runlist_id;
227 u32 i;
228 410
229 gk20a_dbg_fn(""); 411 gk20a_dbg_fn("");
230 412
@@ -232,19 +414,14 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
232 vfree(f->tsg); 414 vfree(f->tsg);
233 gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); 415 gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd);
234 416
235 engine_info = f->engine_info + ENGINE_GR_GK20A; 417 gk20a_fifo_delete_runlist(f);
236 runlist_id = engine_info->runlist_id;
237 runlist = &f->runlist_info[runlist_id];
238
239 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++)
240 gk20a_gmmu_free(g, &runlist->mem[i]);
241 418
242 kfree(runlist->active_channels);
243 kfree(runlist->active_tsgs);
244
245 kfree(f->runlist_info);
246 kfree(f->pbdma_map); 419 kfree(f->pbdma_map);
420 f->pbdma_map = NULL;
247 kfree(f->engine_info); 421 kfree(f->engine_info);
422 f->engine_info = NULL;
423 kfree(f->active_engines_list);
424 f->active_engines_list = NULL;
248} 425}
249 426
250/* reads info from hardware and fills in pbmda exception info record */ 427/* reads info from hardware and fills in pbmda exception info record */
@@ -327,69 +504,58 @@ static void fifo_engine_exception_status(struct gk20a *g,
327 504
328static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) 505static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
329{ 506{
330 struct fifo_engine_info_gk20a *engine_info;
331 struct fifo_runlist_info_gk20a *runlist; 507 struct fifo_runlist_info_gk20a *runlist;
332 struct device *d = dev_from_gk20a(g); 508 struct device *d = dev_from_gk20a(g);
333 u32 runlist_id; 509 s32 runlist_id = -1;
334 u32 i; 510 u32 i;
335 u64 runlist_size; 511 u64 runlist_size;
336 512
337 gk20a_dbg_fn(""); 513 gk20a_dbg_fn("");
338 514
339 f->max_runlists = fifo_eng_runlist_base__size_1_v(); 515 f->max_runlists = g->ops.fifo.eng_runlist_base_size();
340 f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) * 516 f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) *
341 f->max_runlists, GFP_KERNEL); 517 f->max_runlists, GFP_KERNEL);
342 if (!f->runlist_info) 518 if (!f->runlist_info)
343 goto clean_up; 519 goto clean_up_runlist;
344 520
345 engine_info = f->engine_info + ENGINE_GR_GK20A; 521 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
346 runlist_id = engine_info->runlist_id; 522 f->max_runlists));
347 runlist = &f->runlist_info[runlist_id];
348 523
349 runlist->active_channels = 524 for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
350 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE), 525 runlist = &f->runlist_info[runlist_id];
351 GFP_KERNEL); 526
352 if (!runlist->active_channels) 527 runlist->active_channels =
353 goto clean_up_runlist_info; 528 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE),
354 529 GFP_KERNEL);
355 runlist->active_tsgs = 530 if (!runlist->active_channels)
356 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE),
357 GFP_KERNEL);
358 if (!runlist->active_tsgs)
359 goto clean_up_runlist_info;
360
361 runlist_size = ram_rl_entry_size_v() * f->num_runlist_entries;
362 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
363 int err = gk20a_gmmu_alloc(g, runlist_size, &runlist->mem[i]);
364 if (err) {
365 dev_err(d, "memory allocation failed\n");
366 goto clean_up_runlist; 531 goto clean_up_runlist;
532
533 runlist->active_tsgs =
534 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE),
535 GFP_KERNEL);
536 if (!runlist->active_tsgs)
537 goto clean_up_runlist;
538
539 runlist_size = ram_rl_entry_size_v() * f->num_runlist_entries;
540 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
541 int err = gk20a_gmmu_alloc(g, runlist_size, &runlist->mem[i]);
542 if (err) {
543 dev_err(d, "memory allocation failed\n");
544 goto clean_up_runlist;
545 }
367 } 546 }
368 } 547 mutex_init(&runlist->mutex);
369 mutex_init(&runlist->mutex);
370 548
371 /* None of buffers is pinned if this value doesn't change. 549 /* None of buffers is pinned if this value doesn't change.
372 Otherwise, one of them (cur_buffer) must have been pinned. */ 550 Otherwise, one of them (cur_buffer) must have been pinned. */
373 runlist->cur_buffer = MAX_RUNLIST_BUFFERS; 551 runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
552 }
374 553
375 gk20a_dbg_fn("done"); 554 gk20a_dbg_fn("done");
376 return 0; 555 return 0;
377 556
378clean_up_runlist: 557clean_up_runlist:
379 for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) 558 gk20a_fifo_delete_runlist(f);
380 gk20a_gmmu_free(g, &runlist->mem[i]);
381
382clean_up_runlist_info:
383 kfree(runlist->active_channels);
384 runlist->active_channels = NULL;
385
386 kfree(runlist->active_tsgs);
387 runlist->active_tsgs = NULL;
388
389 kfree(f->runlist_info);
390 f->runlist_info = NULL;
391
392clean_up:
393 gk20a_dbg_fn("fail"); 559 gk20a_dbg_fn("fail");
394 return -ENOMEM; 560 return -ENOMEM;
395} 561}
@@ -543,7 +709,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
543 f->num_channels = g->ops.fifo.get_num_fifos(g); 709 f->num_channels = g->ops.fifo.get_num_fifos(g);
544 f->num_runlist_entries = fifo_eng_runlist_length_max_v(); 710 f->num_runlist_entries = fifo_eng_runlist_length_max_v();
545 f->num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 711 f->num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
546 f->max_engines = ENGINE_INVAL_GK20A; 712 f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
547 713
548 f->userd_entry_size = 1 << ram_userd_base_shift_v(); 714 f->userd_entry_size = 1 << ram_userd_base_shift_v();
549 715
@@ -563,11 +729,15 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
563 GFP_KERNEL); 729 GFP_KERNEL);
564 f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info), 730 f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info),
565 GFP_KERNEL); 731 GFP_KERNEL);
732 f->active_engines_list = kzalloc(f->max_engines * sizeof(u32),
733 GFP_KERNEL);
566 734
567 if (!(f->channel && f->pbdma_map && f->engine_info)) { 735 if (!(f->channel && f->pbdma_map && f->engine_info &&
736 f->active_engines_list)) {
568 err = -ENOMEM; 737 err = -ENOMEM;
569 goto clean_up; 738 goto clean_up;
570 } 739 }
740 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32)));
571 741
572 /* pbdma map needs to be in place before calling engine info init */ 742 /* pbdma map needs to be in place before calling engine info init */
573 for (i = 0; i < f->num_pbdma; ++i) 743 for (i = 0; i < f->num_pbdma; ++i)
@@ -614,6 +784,8 @@ clean_up:
614 f->pbdma_map = NULL; 784 f->pbdma_map = NULL;
615 kfree(f->engine_info); 785 kfree(f->engine_info);
616 f->engine_info = NULL; 786 f->engine_info = NULL;
787 kfree(f->active_engines_list);
788 f->active_engines_list = NULL;
617 789
618 return err; 790 return err;
619} 791}
@@ -829,9 +1001,29 @@ static inline void get_exception_mmu_fault_info(
829 1001
830void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) 1002void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
831{ 1003{
1004 struct fifo_gk20a *f = NULL;
1005 u32 engine_enum = ENGINE_INVAL_GK20A;
1006 u32 inst_id = 0;
1007 struct fifo_engine_info_gk20a *engine_info;
1008
832 gk20a_dbg_fn(""); 1009 gk20a_dbg_fn("");
833 1010
834 if (engine_id == ENGINE_GR_GK20A) { 1011 if (!g)
1012 return;
1013
1014 f = &g->fifo;
1015
1016 engine_info = gk20a_fifo_get_engine_info(g, engine_id);
1017
1018 if (engine_info) {
1019 engine_enum = engine_info->engine_enum;
1020 inst_id = engine_info->inst_id;
1021 }
1022
1023 if (engine_enum == ENGINE_INVAL_GK20A)
1024 gk20a_err(dev_from_gk20a(g), "unsupported engine_id %d", engine_id);
1025
1026 if (engine_enum == ENGINE_GR_GK20A) {
835 if (support_gk20a_pmu(g->dev) && g->elpg_enabled) 1027 if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
836 gk20a_pmu_disable_elpg(g); 1028 gk20a_pmu_disable_elpg(g);
837 /* resetting engine will alter read/write index. 1029 /* resetting engine will alter read/write index.
@@ -848,8 +1040,10 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
848 if (support_gk20a_pmu(g->dev) && g->elpg_enabled) 1040 if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
849 gk20a_pmu_enable_elpg(g); 1041 gk20a_pmu_enable_elpg(g);
850 } 1042 }
851 if (engine_id == ENGINE_CE2_GK20A) 1043 if ((engine_enum == ENGINE_GRCE_GK20A) ||
852 gk20a_reset(g, mc_enable_ce2_m()); 1044 (engine_enum == ENGINE_ASYNC_CE_GK20A)) {
1045 gk20a_reset(g, engine_info->reset_mask);
1046 }
853} 1047}
854 1048
855static void gk20a_fifo_handle_chsw_fault(struct gk20a *g) 1049static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
@@ -872,6 +1066,24 @@ static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
872static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id, 1066static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
873 struct fifo_mmu_fault_info_gk20a *f, bool fake_fault) 1067 struct fifo_mmu_fault_info_gk20a *f, bool fake_fault)
874{ 1068{
1069 u32 engine_enum = ENGINE_INVAL_GK20A;
1070 struct fifo_gk20a *fifo = NULL;
1071 struct fifo_engine_info_gk20a *engine_info;
1072
1073 if (!g || !f)
1074 return false;
1075
1076 fifo = &g->fifo;
1077
1078 engine_info = gk20a_fifo_get_engine_info(g, engine_id);
1079
1080 if (engine_info) {
1081 engine_enum = engine_info->engine_enum;
1082 }
1083
1084 if (engine_enum == ENGINE_INVAL_GK20A)
1085 return false;
1086
875 /* channel recovery is only deferred if an sm debugger 1087 /* channel recovery is only deferred if an sm debugger
876 is attached and has MMU debug mode is enabled */ 1088 is attached and has MMU debug mode is enabled */
877 if (!gk20a_gr_sm_debugger_attached(g) || 1089 if (!gk20a_gr_sm_debugger_attached(g) ||
@@ -882,7 +1094,7 @@ static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
882 if (fake_fault) 1094 if (fake_fault)
883 return false; 1095 return false;
884 1096
885 if (engine_id != ENGINE_GR_GK20A || 1097 if (engine_enum != ENGINE_GR_GK20A ||
886 f->engine_subid_v != fifo_intr_mmu_fault_info_engine_subid_gpc_v()) 1098 f->engine_subid_v != fifo_intr_mmu_fault_info_engine_subid_gpc_v())
887 return false; 1099 return false;
888 1100
@@ -1001,8 +1213,8 @@ static bool gk20a_fifo_handle_mmu_fault(
1001 false); 1213 false);
1002 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, 1214 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
1003 false); 1215 false);
1004 gr_gk20a_init_elcg_mode(g, ELCG_RUN, ENGINE_GR_GK20A); 1216
1005 gr_gk20a_init_elcg_mode(g, ELCG_RUN, ENGINE_CE2_GK20A); 1217 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
1006 1218
1007 /* Disable fifo access */ 1219 /* Disable fifo access */
1008 grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r()); 1220 grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r());
@@ -1219,7 +1431,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g,
1219 1431
1220 /* trigger faults for all bad engines */ 1432 /* trigger faults for all bad engines */
1221 for_each_set_bit(engine_id, &engine_ids, 32) { 1433 for_each_set_bit(engine_id, &engine_ids, 32) {
1222 if (engine_id > g->fifo.max_engines) { 1434 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
1223 WARN_ON(true); 1435 WARN_ON(true);
1224 break; 1436 break;
1225 } 1437 }
@@ -1257,8 +1469,9 @@ static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
1257 int i; 1469 int i;
1258 u32 engines = 0; 1470 u32 engines = 0;
1259 1471
1260 for (i = 0; i < g->fifo.max_engines; i++) { 1472 for (i = 0; i < g->fifo.num_engines; i++) {
1261 u32 status = gk20a_readl(g, fifo_engine_status_r(i)); 1473 u32 active_engine_id = g->fifo.active_engines_list[i];
1474 u32 status = gk20a_readl(g, fifo_engine_status_r(active_engine_id));
1262 u32 ctx_status = 1475 u32 ctx_status =
1263 fifo_engine_status_ctx_status_v(status); 1476 fifo_engine_status_ctx_status_v(status);
1264 u32 ctx_id = (ctx_status == 1477 u32 ctx_id = (ctx_status ==
@@ -1276,7 +1489,7 @@ static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
1276 fifo_engine_status_id_type_tsgid_v()) || 1489 fifo_engine_status_id_type_tsgid_v()) ||
1277 (!is_tsg && type == 1490 (!is_tsg && type ==
1278 fifo_engine_status_id_type_chid_v())) 1491 fifo_engine_status_id_type_chid_v()))
1279 engines |= BIT(i); 1492 engines |= BIT(active_engine_id);
1280 } 1493 }
1281 } 1494 }
1282 1495
@@ -1382,15 +1595,16 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1382 ref_id_is_tsg = false; 1595 ref_id_is_tsg = false;
1383 /* Reset *all* engines that use the 1596 /* Reset *all* engines that use the
1384 * same channel as faulty engine */ 1597 * same channel as faulty engine */
1385 for (i = 0; i < g->fifo.max_engines; i++) { 1598 for (i = 0; i < g->fifo.num_engines; i++) {
1599 u32 active_engine_id = g->fifo.active_engines_list[i];
1386 u32 type; 1600 u32 type;
1387 u32 id; 1601 u32 id;
1388 1602
1389 gk20a_fifo_get_faulty_id_type(g, i, &id, &type); 1603 gk20a_fifo_get_faulty_id_type(g, active_engine_id, &id, &type);
1390 if (ref_type == type && ref_id == id) { 1604 if (ref_type == type && ref_id == id) {
1391 engine_ids |= BIT(i); 1605 engine_ids |= BIT(active_engine_id);
1392 mmu_fault_engines |= 1606 mmu_fault_engines |=
1393 BIT(gk20a_engine_id_to_mmu_id(g, i)); 1607 BIT(gk20a_engine_id_to_mmu_id(g, active_engine_id));
1394 } 1608 }
1395 } 1609 }
1396 } 1610 }
@@ -1453,16 +1667,21 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose)
1453u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g, 1667u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
1454 int *__id, bool *__is_tsg) 1668 int *__id, bool *__is_tsg)
1455{ 1669{
1456 u32 engine_id = -1; 1670 u32 engine_id;
1457 int id = -1; 1671 int id = -1;
1458 bool is_tsg = false; 1672 bool is_tsg = false;
1459 u32 mailbox2; 1673 u32 mailbox2;
1674 u32 active_engine_id = FIFO_INVAL_ENGINE_ID;
1460 1675
1461 for (engine_id = 0; engine_id < g->fifo.max_engines; engine_id++) { 1676 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
1462 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id)); 1677 u32 status;
1463 u32 ctx_status = fifo_engine_status_ctx_status_v(status); 1678 u32 ctx_status;
1464 bool failing_engine; 1679 bool failing_engine;
1465 1680
1681 active_engine_id = g->fifo.active_engines_list[engine_id];
1682 status = gk20a_readl(g, fifo_engine_status_r(active_engine_id));
1683 ctx_status = fifo_engine_status_ctx_status_v(status);
1684
1466 /* we are interested in busy engines */ 1685 /* we are interested in busy engines */
1467 failing_engine = fifo_engine_status_engine_v(status) == 1686 failing_engine = fifo_engine_status_engine_v(status) ==
1468 fifo_engine_status_engine_busy_v(); 1687 fifo_engine_status_engine_busy_v();
@@ -1476,8 +1695,10 @@ u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
1476 || ctx_status == 1695 || ctx_status ==
1477 fifo_engine_status_ctx_status_ctxsw_load_v()); 1696 fifo_engine_status_ctx_status_ctxsw_load_v());
1478 1697
1479 if (!failing_engine) 1698 if (!failing_engine) {
1699 active_engine_id = FIFO_INVAL_ENGINE_ID;
1480 continue; 1700 continue;
1701 }
1481 1702
1482 if (ctx_status == 1703 if (ctx_status ==
1483 fifo_engine_status_ctx_status_ctxsw_load_v()) { 1704 fifo_engine_status_ctx_status_ctxsw_load_v()) {
@@ -1500,7 +1721,7 @@ u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,
1500 *__id = id; 1721 *__id = id;
1501 *__is_tsg = is_tsg; 1722 *__is_tsg = is_tsg;
1502 1723
1503 return engine_id; 1724 return active_engine_id;
1504} 1725}
1505 1726
1506static bool gk20a_fifo_handle_sched_error(struct gk20a *g) 1727static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
@@ -1517,7 +1738,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
1517 engine_id = gk20a_fifo_get_failing_engine_data(g, &id, &is_tsg); 1738 engine_id = gk20a_fifo_get_failing_engine_data(g, &id, &is_tsg);
1518 1739
1519 /* could not find the engine - should never happen */ 1740 /* could not find the engine - should never happen */
1520 if (unlikely(engine_id >= g->fifo.max_engines)) { 1741 if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
1521 gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, failed to find engine\n", 1742 gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, failed to find engine\n",
1522 sched_error); 1743 sched_error);
1523 ret = false; 1744 ret = false;
@@ -1627,14 +1848,16 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
1627 "channel reset initiated from %s; intr=0x%08x", 1848 "channel reset initiated from %s; intr=0x%08x",
1628 __func__, fifo_intr); 1849 __func__, fifo_intr);
1629 for (engine_id = 0; 1850 for (engine_id = 0;
1630 engine_id < g->fifo.max_engines; 1851 engine_id < g->fifo.num_engines;
1631 engine_id++) { 1852 engine_id++) {
1632 gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_id, 1853 u32 active_engine_id = g->fifo.active_engines_list[engine_id];
1633 g->fifo.engine_info[engine_id].engine_id); 1854 u32 engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
1634 fifo_pbdma_exception_status(g, 1855 gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_enum,
1635 &g->fifo.engine_info[engine_id]); 1856 active_engine_id);
1636 fifo_engine_exception_status(g, 1857 fifo_pbdma_exception_status(g,
1637 &g->fifo.engine_info[engine_id]); 1858 &g->fifo.engine_info[active_engine_id]);
1859 fifo_engine_exception_status(g,
1860 &g->fifo.engine_info[active_engine_id]);
1638 } 1861 }
1639 } 1862 }
1640 1863
@@ -2057,12 +2280,13 @@ int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
2057 int i; 2280 int i;
2058 int err = 0, ret = 0; 2281 int err = 0, ret = 0;
2059 2282
2060 for (i = 0; i < g->fifo.max_engines; i++) { 2283 for (i = 0; i < g->fifo.num_engines; i++) {
2284 u32 active_engine_id = g->fifo.active_engines_list[i];
2061 err = gk20a_fifo_enable_engine_activity(g, 2285 err = gk20a_fifo_enable_engine_activity(g,
2062 &g->fifo.engine_info[i]); 2286 &g->fifo.engine_info[active_engine_id]);
2063 if (err) { 2287 if (err) {
2064 gk20a_err(dev_from_gk20a(g), 2288 gk20a_err(dev_from_gk20a(g),
2065 "failed to enable engine %d activity\n", i); 2289 "failed to enable engine %d activity\n", active_engine_id);
2066 ret = err; 2290 ret = err;
2067 } 2291 }
2068 } 2292 }
@@ -2149,14 +2373,16 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
2149{ 2373{
2150 int i; 2374 int i;
2151 int err = 0, ret = 0; 2375 int err = 0, ret = 0;
2376 u32 active_engine_id;
2152 2377
2153 for (i = 0; i < g->fifo.max_engines; i++) { 2378 for (i = 0; i < g->fifo.num_engines; i++) {
2379 active_engine_id = g->fifo.active_engines_list[i];
2154 err = gk20a_fifo_disable_engine_activity(g, 2380 err = gk20a_fifo_disable_engine_activity(g,
2155 &g->fifo.engine_info[i], 2381 &g->fifo.engine_info[active_engine_id],
2156 wait_for_idle); 2382 wait_for_idle);
2157 if (err) { 2383 if (err) {
2158 gk20a_err(dev_from_gk20a(g), 2384 gk20a_err(dev_from_gk20a(g),
2159 "failed to disable engine %d activity\n", i); 2385 "failed to disable engine %d activity\n", active_engine_id);
2160 ret = err; 2386 ret = err;
2161 break; 2387 break;
2162 } 2388 }
@@ -2164,11 +2390,12 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
2164 2390
2165 if (err) { 2391 if (err) {
2166 while (--i >= 0) { 2392 while (--i >= 0) {
2393 active_engine_id = g->fifo.active_engines_list[i];
2167 err = gk20a_fifo_enable_engine_activity(g, 2394 err = gk20a_fifo_enable_engine_activity(g,
2168 &g->fifo.engine_info[i]); 2395 &g->fifo.engine_info[active_engine_id]);
2169 if (err) 2396 if (err)
2170 gk20a_err(dev_from_gk20a(g), 2397 gk20a_err(dev_from_gk20a(g),
2171 "failed to re-enable engine %d activity\n", i); 2398 "failed to re-enable engine %d activity\n", active_engine_id);
2172 } 2399 }
2173 } 2400 }
2174 2401
@@ -2181,14 +2408,15 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
2181 u32 engines = 0; 2408 u32 engines = 0;
2182 int i; 2409 int i;
2183 2410
2184 for (i = 0; i < f->max_engines; i++) { 2411 for (i = 0; i < f->num_engines; i++) {
2185 u32 status = gk20a_readl(g, fifo_engine_status_r(i)); 2412 u32 active_engine_id = g->fifo.active_engines_list[i];
2413 u32 status = gk20a_readl(g, fifo_engine_status_r(active_engine_id));
2186 bool engine_busy = fifo_engine_status_engine_v(status) == 2414 bool engine_busy = fifo_engine_status_engine_v(status) ==
2187 fifo_engine_status_engine_busy_v(); 2415 fifo_engine_status_engine_busy_v();
2188 2416
2189 if (engine_busy && 2417 if (engine_busy &&
2190 (f->engine_info[i].runlist_id == runlist_id)) 2418 (f->engine_info[active_engine_id].runlist_id == runlist_id))
2191 engines |= BIT(i); 2419 engines |= BIT(active_engine_id);
2192 } 2420 }
2193 2421
2194 if (engines) 2422 if (engines)
@@ -2669,8 +2897,10 @@ static int gk20a_fifo_sched_debugfs_seq_show(
2669 struct fifo_runlist_info_gk20a *runlist; 2897 struct fifo_runlist_info_gk20a *runlist;
2670 u32 runlist_id; 2898 u32 runlist_id;
2671 int ret = SEQ_SKIP; 2899 int ret = SEQ_SKIP;
2900 u32 engine_id;
2672 2901
2673 engine_info = f->engine_info + ENGINE_GR_GK20A; 2902 engine_id = gk20a_fifo_get_gr_engine_id(g);
2903 engine_info = (f->engine_info + engine_id);
2674 runlist_id = engine_info->runlist_id; 2904 runlist_id = engine_info->runlist_id;
2675 runlist = &f->runlist_info[runlist_id]; 2905 runlist = &f->runlist_info[runlist_id];
2676 2906
@@ -2772,6 +3002,7 @@ void gk20a_init_fifo(struct gpu_ops *gops)
2772 gops->fifo.set_runlist_interleave = gk20a_fifo_set_runlist_interleave; 3002 gops->fifo.set_runlist_interleave = gk20a_fifo_set_runlist_interleave;
2773 gops->fifo.force_reset_ch = gk20a_fifo_force_reset_ch; 3003 gops->fifo.force_reset_ch = gk20a_fifo_force_reset_ch;
2774 gops->fifo.engine_enum_from_type = gk20a_fifo_engine_enum_from_type; 3004 gops->fifo.engine_enum_from_type = gk20a_fifo_engine_enum_from_type;
2775 /* gk20a don't support device_info_data packet parsing */ 3005 /* gk20a doesn't support device_info_data packet parsing */
2776 gops->fifo.device_info_data_parse = NULL; 3006 gops->fifo.device_info_data_parse = NULL;
3007 gops->fifo.eng_runlist_base_size = fifo_eng_runlist_base__size_1_v;
2777} 3008}