diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2014-10-01 08:15:44 -0400 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2015-03-18 15:11:35 -0400 |
commit | e58fddb0d5ec611e606e6125070253219f2da13e (patch) | |
tree | f2ff4a6c3c760ae1ee2bba3f46cb549f4cbc63bb | |
parent | e98ac1867deb5acf008b8400ea78e81986719df7 (diff) |
gpu: nvgpu: Do not reset ctxsw & wait for fe_gi
At this stage, ctxsw is always in reset state, because we're powering GPU
up, or we have reset the whole GR partition. Remove the code to invoke a
second reset.
Fix waiting for FE idle. We should wait after each bundle, and break if any
iteration fails.
Change-Id: I0846f67c6d860a485dea62ff870deafe55a47365
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/552799
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 108 |
1 files changed, 5 insertions, 103 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index ca7294fd..dde57494 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -352,104 +352,6 @@ static int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long end_jiffies, | |||
352 | 352 | ||
353 | return -EAGAIN; | 353 | return -EAGAIN; |
354 | } | 354 | } |
355 | static int gr_gk20a_ctx_reset(struct gk20a *g, u32 rst_mask) | ||
356 | { | ||
357 | u32 delay = GR_IDLE_CHECK_DEFAULT; | ||
358 | unsigned long end_jiffies = jiffies + | ||
359 | msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); | ||
360 | u32 reg; | ||
361 | |||
362 | gk20a_dbg_fn(""); | ||
363 | |||
364 | if (!tegra_platform_is_linsim()) { | ||
365 | /* Force clocks on */ | ||
366 | gk20a_writel(g, gr_fe_pwr_mode_r(), | ||
367 | gr_fe_pwr_mode_req_send_f() | | ||
368 | gr_fe_pwr_mode_mode_force_on_f()); | ||
369 | |||
370 | /* Wait for the clocks to indicate that they are on */ | ||
371 | do { | ||
372 | reg = gk20a_readl(g, gr_fe_pwr_mode_r()); | ||
373 | |||
374 | if (gr_fe_pwr_mode_req_v(reg) == | ||
375 | gr_fe_pwr_mode_req_done_v()) | ||
376 | break; | ||
377 | |||
378 | usleep_range(delay, delay * 2); | ||
379 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | ||
380 | |||
381 | } while (time_before(jiffies, end_jiffies)); | ||
382 | |||
383 | if (!time_before(jiffies, end_jiffies)) { | ||
384 | gk20a_err(dev_from_gk20a(g), | ||
385 | "failed to force the clocks on\n"); | ||
386 | WARN_ON(1); | ||
387 | } | ||
388 | } | ||
389 | if (rst_mask) { | ||
390 | gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), rst_mask); | ||
391 | } else { | ||
392 | gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), | ||
393 | gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() | | ||
394 | gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f() | | ||
395 | gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f() | | ||
396 | gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f() | | ||
397 | gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f() | | ||
398 | gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f() | | ||
399 | gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f() | | ||
400 | gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f() | | ||
401 | gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f()); | ||
402 | } | ||
403 | |||
404 | /* we need to read the reset register *and* wait for a moment to ensure | ||
405 | * reset propagation */ | ||
406 | |||
407 | gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r()); | ||
408 | udelay(20); | ||
409 | |||
410 | gk20a_writel(g, gr_fecs_ctxsw_reset_ctl_r(), | ||
411 | gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f() | | ||
412 | gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f() | | ||
413 | gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f() | | ||
414 | gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f() | | ||
415 | gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f() | | ||
416 | gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f() | | ||
417 | gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f() | | ||
418 | gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f() | | ||
419 | gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f()); | ||
420 | |||
421 | /* we need to readl the reset and then wait a small moment after that */ | ||
422 | gk20a_readl(g, gr_fecs_ctxsw_reset_ctl_r()); | ||
423 | udelay(20); | ||
424 | |||
425 | if (!tegra_platform_is_linsim()) { | ||
426 | /* Set power mode back to auto */ | ||
427 | gk20a_writel(g, gr_fe_pwr_mode_r(), | ||
428 | gr_fe_pwr_mode_req_send_f() | | ||
429 | gr_fe_pwr_mode_mode_auto_f()); | ||
430 | |||
431 | /* Wait for the request to complete */ | ||
432 | end_jiffies = jiffies + | ||
433 | msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); | ||
434 | do { | ||
435 | reg = gk20a_readl(g, gr_fe_pwr_mode_r()); | ||
436 | |||
437 | if (gr_fe_pwr_mode_req_v(reg) == | ||
438 | gr_fe_pwr_mode_req_done_v()) | ||
439 | break; | ||
440 | |||
441 | usleep_range(delay, delay * 2); | ||
442 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | ||
443 | |||
444 | } while (time_before(jiffies, end_jiffies)); | ||
445 | |||
446 | if (!time_before(jiffies, end_jiffies)) | ||
447 | gk20a_warn(dev_from_gk20a(g), | ||
448 | "failed to set power mode to auto\n"); | ||
449 | } | ||
450 | |||
451 | return 0; | ||
452 | } | ||
453 | 355 | ||
454 | static int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, | 356 | static int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id, |
455 | u32 *mailbox_ret, u32 opc_success, | 357 | u32 *mailbox_ret, u32 opc_success, |
@@ -1499,8 +1401,6 @@ static u32 gk20a_init_sw_bundle(struct gk20a *g) | |||
1499 | 1401 | ||
1500 | /* load bundle init */ | 1402 | /* load bundle init */ |
1501 | for (i = 0; i < sw_bundle_init->count; i++) { | 1403 | for (i = 0; i < sw_bundle_init->count; i++) { |
1502 | err |= gr_gk20a_wait_fe_idle(g, end_jiffies, | ||
1503 | GR_IDLE_CHECK_DEFAULT); | ||
1504 | if (i == 0 || last_bundle_data != sw_bundle_init->l[i].value) { | 1404 | if (i == 0 || last_bundle_data != sw_bundle_init->l[i].value) { |
1505 | gk20a_writel(g, gr_pipe_bundle_data_r(), | 1405 | gk20a_writel(g, gr_pipe_bundle_data_r(), |
1506 | sw_bundle_init->l[i].value); | 1406 | sw_bundle_init->l[i].value); |
@@ -1514,6 +1414,11 @@ static u32 gk20a_init_sw_bundle(struct gk20a *g) | |||
1514 | GR_GO_IDLE_BUNDLE) | 1414 | GR_GO_IDLE_BUNDLE) |
1515 | err |= gr_gk20a_wait_idle(g, end_jiffies, | 1415 | err |= gr_gk20a_wait_idle(g, end_jiffies, |
1516 | GR_IDLE_CHECK_DEFAULT); | 1416 | GR_IDLE_CHECK_DEFAULT); |
1417 | |||
1418 | err = gr_gk20a_wait_fe_idle(g, end_jiffies, | ||
1419 | GR_IDLE_CHECK_DEFAULT); | ||
1420 | if (err) | ||
1421 | break; | ||
1517 | } | 1422 | } |
1518 | 1423 | ||
1519 | /* disable pipe mode override */ | 1424 | /* disable pipe mode override */ |
@@ -4516,9 +4421,6 @@ int gk20a_init_gr_reset_enable_hw(struct gk20a *g) | |||
4516 | gk20a_writel(g, gr_intr_r(), ~0); | 4421 | gk20a_writel(g, gr_intr_r(), ~0); |
4517 | gk20a_writel(g, gr_intr_en_r(), ~0); | 4422 | gk20a_writel(g, gr_intr_en_r(), ~0); |
4518 | 4423 | ||
4519 | /* reset ctx switch state */ | ||
4520 | gr_gk20a_ctx_reset(g, 0); | ||
4521 | |||
4522 | /* clear scc ram */ | 4424 | /* clear scc ram */ |
4523 | gk20a_writel(g, gr_scc_init_r(), | 4425 | gk20a_writel(g, gr_scc_init_r(), |
4524 | gr_scc_init_ram_trigger_f()); | 4426 | gr_scc_init_ram_trigger_f()); |