summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2017-03-03 15:36:16 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-23 20:18:34 -0400
commite1e059454a00ea28c5c9ab992eaf7f7c6780c3b8 (patch)
tree6f8f57a3572fde87aadabef2c24844c1ac1fb7bb /drivers/gpu/nvgpu
parent17df1921807a190d24dbd5b0e0f78192c2e3b772 (diff)
gpu: nvgpu: add function to enable/disable runlists sched
-gk20a_fifo_set_runlist_state() can be used to enable/disable runlists scheduler. This change would be needed for t19x fifo recovery too -Also delete gk20a_fifo_disable_all_engine_activity function as it is not used anywhere. JIRA GPUT19X-7 Change-Id: I6bb9a7574a473327f0e47060f32d52cd90551c6d Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: http://git-master/r/1315180 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c108
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h13
2 files changed, 72 insertions, 49 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index ad69cd79..fb31c3fd 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -2596,25 +2596,76 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
2596 return err; 2596 return err;
2597} 2597}
2598 2598
2599int gk20a_fifo_enable_engine_activity(struct gk20a *g, 2599static void gk20a_fifo_sched_disable_rw(struct gk20a *g, u32 runlists_mask,
2600 struct fifo_engine_info_gk20a *eng_info) 2600 u32 runlist_state)
2601{
2602 u32 reg_val;
2603
2604 reg_val = gk20a_readl(g, fifo_sched_disable_r());
2605
2606 if (runlist_state == RUNLIST_DISABLED)
2607 reg_val |= runlists_mask;
2608 else
2609 reg_val &= (~runlists_mask);
2610
2611 gk20a_writel(g, fifo_sched_disable_r(), reg_val);
2612
2613}
2614
2615void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
2616 u32 runlist_state,
2617 int is_runlist_info_mutex_locked)
2601{ 2618{
2602 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 2619 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
2603 u32 mutex_ret; 2620 u32 mutex_ret;
2604 u32 enable; 2621 u32 runlist_id;
2605 2622
2606 gk20a_dbg_fn(""); 2623 gk20a_dbg_fn("");
2607 2624
2625 if (!is_runlist_info_mutex_locked) {
2626 gk20a_dbg_info("acquire runlist_info mutex");
2627 for (runlist_id = 0; runlist_id < g->fifo.max_runlists;
2628 runlist_id++) {
2629 if (runlists_mask &
2630 fifo_sched_disable_runlist_m(runlist_id))
2631 nvgpu_mutex_acquire(&g->fifo.
2632 runlist_info[runlist_id].mutex);
2633 }
2634 }
2635
2608 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2636 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2609 2637
2610 enable = gk20a_readl(g, fifo_sched_disable_r()); 2638 gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state);
2611 enable &= ~(fifo_sched_disable_true_v() << eng_info->runlist_id);
2612 gk20a_writel(g, fifo_sched_disable_r(), enable);
2613 2639
2614 if (!mutex_ret) 2640 if (!mutex_ret)
2615 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2641 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2616 2642
2643 if (!is_runlist_info_mutex_locked) {
2644 gk20a_dbg_info("release runlist_info mutex");
2645 for (runlist_id = 0; runlist_id < g->fifo.max_runlists;
2646 runlist_id++) {
2647 if (runlists_mask &
2648 fifo_sched_disable_runlist_m(runlist_id))
2649
2650 nvgpu_mutex_release(&g->fifo.
2651 runlist_info[runlist_id].mutex);
2652 }
2653 }
2654
2655 gk20a_dbg_fn("done");
2656}
2657
2658int gk20a_fifo_enable_engine_activity(struct gk20a *g,
2659 struct fifo_engine_info_gk20a *eng_info)
2660{
2661 gk20a_dbg_fn("");
2662
2663 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
2664 eng_info->runlist_id), RUNLIST_ENABLED,
2665 !RUNLIST_INFO_MUTEX_LOCKED);
2666
2617 gk20a_dbg_fn("done"); 2667 gk20a_dbg_fn("done");
2668
2618 return 0; 2669 return 0;
2619} 2670}
2620 2671
@@ -2643,7 +2694,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2643{ 2694{
2644 u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; 2695 u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
2645 u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; 2696 u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID;
2646 u32 engine_chid = FIFO_INVAL_CHANNEL_ID, disable; 2697 u32 engine_chid = FIFO_INVAL_CHANNEL_ID;
2647 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 2698 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
2648 u32 mutex_ret; 2699 u32 mutex_ret;
2649 u32 err = 0; 2700 u32 err = 0;
@@ -2658,12 +2709,9 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2658 2709
2659 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2710 mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2660 2711
2661 disable = gk20a_readl(g, fifo_sched_disable_r()); 2712 gk20a_fifo_set_runlist_state(g, fifo_sched_disable_runlist_m(
2662 disable = set_field(disable, 2713 eng_info->runlist_id), RUNLIST_DISABLED,
2663 fifo_sched_disable_runlist_m(eng_info->runlist_id), 2714 !RUNLIST_INFO_MUTEX_LOCKED);
2664 fifo_sched_disable_runlist_f(fifo_sched_disable_true_v(),
2665 eng_info->runlist_id));
2666 gk20a_writel(g, fifo_sched_disable_r(), disable);
2667 2715
2668 /* chid from pbdma status */ 2716 /* chid from pbdma status */
2669 pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id)); 2717 pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id));
@@ -2712,40 +2760,6 @@ clean_up:
2712 return err; 2760 return err;
2713} 2761}
2714 2762
2715int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
2716 bool wait_for_idle)
2717{
2718 unsigned int i;
2719 int err = 0, ret = 0;
2720 u32 active_engine_id;
2721
2722 for (i = 0; i < g->fifo.num_engines; i++) {
2723 active_engine_id = g->fifo.active_engines_list[i];
2724 err = gk20a_fifo_disable_engine_activity(g,
2725 &g->fifo.engine_info[active_engine_id],
2726 wait_for_idle);
2727 if (err) {
2728 gk20a_err(dev_from_gk20a(g),
2729 "failed to disable engine %d activity\n", active_engine_id);
2730 ret = err;
2731 break;
2732 }
2733 }
2734
2735 if (err) {
2736 while (i-- != 0) {
2737 active_engine_id = g->fifo.active_engines_list[i];
2738 err = gk20a_fifo_enable_engine_activity(g,
2739 &g->fifo.engine_info[active_engine_id]);
2740 if (err)
2741 gk20a_err(dev_from_gk20a(g),
2742 "failed to re-enable engine %d activity\n", active_engine_id);
2743 }
2744 }
2745
2746 return ret;
2747}
2748
2749static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id) 2763static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
2750{ 2764{
2751 struct fifo_gk20a *f = &g->fifo; 2765 struct fifo_gk20a *f = &g->fifo;
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 9793f37b..eab57ba3 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -47,6 +47,11 @@
47#define FIFO_PROFILING_ENTRIES 16384 47#define FIFO_PROFILING_ENTRIES 16384
48#endif 48#endif
49 49
50#define RUNLIST_DISABLED 0
51#define RUNLIST_ENABLED 1
52
53#define RUNLIST_INFO_MUTEX_LOCKED 1
54
50/* generally corresponds to the "pbdma" engine */ 55/* generally corresponds to the "pbdma" engine */
51 56
52struct fifo_runlist_info_gk20a { 57struct fifo_runlist_info_gk20a {
@@ -290,11 +295,13 @@ const char *gk20a_fifo_interleave_level_name(u32 interleave_level);
290int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, 295int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
291 u32 *inst_id); 296 u32 *inst_id);
292 297
293u32 gk20a_fifo_get_engine_ids(struct gk20a *g, u32 engine_id[], u32 engine_id_sz, u32 engine_enum); 298u32 gk20a_fifo_get_engine_ids(struct gk20a *g, u32 engine_id[],
299 u32 engine_id_sz, u32 engine_enum);
294 300
295void gk20a_fifo_delete_runlist(struct fifo_gk20a *f); 301void gk20a_fifo_delete_runlist(struct fifo_gk20a *f);
296 302
297struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 engine_id); 303struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g,
304 u32 engine_id);
298 305
299bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id); 306bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id);
300 307
@@ -317,6 +324,8 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f);
317 324
318void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist); 325void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist);
319void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist); 326void gk20a_get_ch_runlist_entry(struct channel_gk20a *ch, u32 *runlist);
327void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
328 u32 runlist_state, int runlist_mutex_state);
320 329
321u32 gk20a_userd_gp_get(struct gk20a *g, struct channel_gk20a *c); 330u32 gk20a_userd_gp_get(struct gk20a *g, struct channel_gk20a *c);
322void gk20a_userd_gp_put(struct gk20a *g, struct channel_gk20a *c); 331void gk20a_userd_gp_put(struct gk20a *g, struct channel_gk20a *c);