summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-04-19 07:28:28 -0400
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:54:54 -0500
commit6113c679a99ca09256d33a582a4dfe648e100c23 (patch)
tree50a30eb1fc31daa10be5f382e99c8995b0df5978
parent5237f4a2a143a6410cc2eac04a62511a637fd321 (diff)
gpu: nvgpu: API to set preemption mode
Separate out new API gr_gp10b_set_ctxsw_preemption_mode() which will check requested preemption modes and take appropriate action for each preemption mode This API will also do some sanity checking for valid preemption modes and combinations Define API set_preemption_mode() for gp10b which will set the preemption modes passed as argument and then use gr_gp10b_set_ctxsw_preemption_mode() and update_ctxsw_preemption_mode() to update preemption mode Legacy path from gr_gp10b_alloc_gr_ctx() will convert flags NVGPU_ALLOC_OBJ_FLAGS_* into appropriate preemption modes and then call gr_gp10b_set_ctxsw_preemption_mode() New API set_preemption_mode() will use new flags NVGPU_GRAPHICS/COMPUTE_PREEMPTION_MODE_* and set and update ctxsw preemption mode In gr_gp10b_update_ctxsw_preemption_mode(), update graphics context to set CTA premption mode if mode NVGPU_COMPUTE_PREEMPTION_MODE_CTA is set Also, define preemption modes in nvgpu-t18x.h and use them everywhere Remove old definitions of modes from gr_gp10b.h Bug 1646259 Change-Id: Ib4dc1fb9933b15d32f0122a9e52665b69402df18 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1131806 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c204
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.h3
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c8
-rw-r--r--include/uapi/linux/nvgpu-t18x.h5
4 files changed, 171 insertions, 49 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index e55c5768..a1a13a2b 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -321,7 +321,7 @@ static int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
321 321
322 gk20a_dbg_fn(""); 322 gk20a_dbg_fn("");
323 323
324 if (gr_ctx->preempt_mode == NVGPU_GR_PREEMPTION_MODE_GFXP) { 324 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) {
325 attrib_size_in_chunk = gr->attrib_cb_default_size + 325 attrib_size_in_chunk = gr->attrib_cb_default_size +
326 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() - 326 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
327 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v()); 327 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
@@ -798,29 +798,33 @@ fail_free:
798 return err; 798 return err;
799} 799}
800 800
801static int gr_gp10b_alloc_gr_ctx(struct gk20a *g, 801static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
802 struct gr_ctx_desc **gr_ctx, struct vm_gk20a *vm, 802 struct gr_ctx_desc *gr_ctx,
803 u32 class, 803 struct vm_gk20a *vm, u32 class,
804 u32 flags) 804 u32 graphics_preempt_mode,
805 u32 compute_preempt_mode)
805{ 806{
806 int err; 807 int err = 0;
807
808 gk20a_dbg_fn("");
809
810 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
811 if (err)
812 return err;
813
814 (*gr_ctx)->t18x.ctx_id_valid = false;
815 808
816 if (class == PASCAL_A && g->gr.t18x.ctx_vars.force_preemption_gfxp) 809 if (class == PASCAL_A && g->gr.t18x.ctx_vars.force_preemption_gfxp)
817 flags |= NVGPU_ALLOC_OBJ_FLAGS_GFXP; 810 graphics_preempt_mode = NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
818 811
819 if (class == PASCAL_COMPUTE_A && 812 if (class == PASCAL_COMPUTE_A &&
820 g->gr.t18x.ctx_vars.force_preemption_cilp) 813 g->gr.t18x.ctx_vars.force_preemption_cilp)
821 flags |= NVGPU_ALLOC_OBJ_FLAGS_CILP; 814 compute_preempt_mode = NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
815
816 /* check for invalid combinations */
817 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
818 return -EINVAL;
822 819
823 if (flags & NVGPU_ALLOC_OBJ_FLAGS_GFXP) { 820 if ((graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) &&
821 (compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP))
822 return -EINVAL;
823
824 /* set preemption modes */
825 switch (graphics_preempt_mode) {
826 case NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP:
827 {
824 u32 spill_size = 828 u32 spill_size =
825 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() * 829 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
826 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(); 830 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
@@ -838,62 +842,112 @@ static int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
838 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); 842 gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size);
839 gk20a_dbg_info("gfxp context attrib_cb_size=%d", 843 gk20a_dbg_info("gfxp context attrib_cb_size=%d",
840 attrib_cb_size); 844 attrib_cb_size);
845
841 err = gr_gp10b_alloc_buffer(vm, 846 err = gr_gp10b_alloc_buffer(vm,
842 g->gr.t18x.ctx_vars.preempt_image_size, 847 g->gr.t18x.ctx_vars.preempt_image_size,
843 &(*gr_ctx)->t18x.preempt_ctxsw_buffer); 848 &gr_ctx->t18x.preempt_ctxsw_buffer);
844 if (err) { 849 if (err) {
845 gk20a_err(dev_from_gk20a(vm->mm->g), 850 gk20a_err(dev_from_gk20a(g),
846 "cannot allocate preempt buffer"); 851 "cannot allocate preempt buffer");
847 goto fail_free_gk20a_ctx; 852 goto fail;
848 } 853 }
849 854
850 err = gr_gp10b_alloc_buffer(vm, 855 err = gr_gp10b_alloc_buffer(vm,
851 spill_size, 856 spill_size,
852 &(*gr_ctx)->t18x.spill_ctxsw_buffer); 857 &gr_ctx->t18x.spill_ctxsw_buffer);
853 if (err) { 858 if (err) {
854 gk20a_err(dev_from_gk20a(vm->mm->g), 859 gk20a_err(dev_from_gk20a(g),
855 "cannot allocate spill buffer"); 860 "cannot allocate spill buffer");
856 goto fail_free_preempt; 861 goto fail_free_preempt;
857 } 862 }
858 863
859 err = gr_gp10b_alloc_buffer(vm, 864 err = gr_gp10b_alloc_buffer(vm,
860 attrib_cb_size, 865 attrib_cb_size,
861 &(*gr_ctx)->t18x.betacb_ctxsw_buffer); 866 &gr_ctx->t18x.betacb_ctxsw_buffer);
862 if (err) { 867 if (err) {
863 gk20a_err(dev_from_gk20a(vm->mm->g), 868 gk20a_err(dev_from_gk20a(g),
864 "cannot allocate beta buffer"); 869 "cannot allocate beta buffer");
865 goto fail_free_spill; 870 goto fail_free_spill;
866 } 871 }
867 872
868 err = gr_gp10b_alloc_buffer(vm, 873 err = gr_gp10b_alloc_buffer(vm,
869 pagepool_size, 874 pagepool_size,
870 &(*gr_ctx)->t18x.pagepool_ctxsw_buffer); 875 &gr_ctx->t18x.pagepool_ctxsw_buffer);
871 if (err) { 876 if (err) {
872 gk20a_err(dev_from_gk20a(vm->mm->g), 877 gk20a_err(dev_from_gk20a(g),
873 "cannot allocate page pool"); 878 "cannot allocate page pool");
874 goto fail_free_betacb; 879 goto fail_free_betacb;
875 } 880 }
876 881
877 (*gr_ctx)->preempt_mode = NVGPU_GR_PREEMPTION_MODE_GFXP; 882 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
883 break;
884 }
885
886 case NVGPU_GRAPHICS_PREEMPTION_MODE_WFI:
887 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
888 break;
889
890 default:
891 break;
878 } 892 }
879 893
880 if (class == PASCAL_COMPUTE_A) { 894 if (class == PASCAL_COMPUTE_A) {
881 if (flags & NVGPU_ALLOC_OBJ_FLAGS_CILP) 895 switch (compute_preempt_mode) {
882 (*gr_ctx)->preempt_mode = NVGPU_GR_PREEMPTION_MODE_CILP; 896 case NVGPU_COMPUTE_PREEMPTION_MODE_WFI:
883 else 897 case NVGPU_COMPUTE_PREEMPTION_MODE_CTA:
884 (*gr_ctx)->preempt_mode = NVGPU_GR_PREEMPTION_MODE_CTA; 898 case NVGPU_COMPUTE_PREEMPTION_MODE_CILP:
899 gr_ctx->compute_preempt_mode = compute_preempt_mode;
900 break;
901 default:
902 break;
903 }
885 } 904 }
886 905
887 gk20a_dbg_fn("done"); 906 return 0;
888
889 return err;
890 907
891fail_free_betacb: 908fail_free_betacb:
892 gk20a_gmmu_unmap_free(vm, &(*gr_ctx)->t18x.betacb_ctxsw_buffer); 909 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
893fail_free_spill: 910fail_free_spill:
894 gk20a_gmmu_unmap_free(vm, &(*gr_ctx)->t18x.spill_ctxsw_buffer); 911 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
895fail_free_preempt: 912fail_free_preempt:
896 gk20a_gmmu_unmap_free(vm, &(*gr_ctx)->t18x.preempt_ctxsw_buffer); 913 gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
914fail:
915 return err;
916}
917
918static int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
919 struct gr_ctx_desc **gr_ctx, struct vm_gk20a *vm,
920 u32 class,
921 u32 flags)
922{
923 int err;
924 u32 graphics_preempt_mode = 0;
925 u32 compute_preempt_mode = 0;
926
927 gk20a_dbg_fn("");
928
929 err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
930 if (err)
931 return err;
932
933 (*gr_ctx)->t18x.ctx_id_valid = false;
934
935 if (flags & NVGPU_ALLOC_OBJ_FLAGS_GFXP)
936 graphics_preempt_mode = NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
937 if (flags & NVGPU_ALLOC_OBJ_FLAGS_CILP)
938 compute_preempt_mode = NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
939
940 if (graphics_preempt_mode || compute_preempt_mode) {
941 err = gr_gp10b_set_ctxsw_preemption_mode(g, *gr_ctx, vm,
942 class, graphics_preempt_mode, compute_preempt_mode);
943 if (err)
944 goto fail_free_gk20a_ctx;
945 }
946
947 gk20a_dbg_fn("done");
948
949 return 0;
950
897fail_free_gk20a_ctx: 951fail_free_gk20a_ctx:
898 gr_gk20a_free_gr_ctx(g, vm, *gr_ctx); 952 gr_gk20a_free_gr_ctx(g, vm, *gr_ctx);
899 *gr_ctx = NULL; 953 *gr_ctx = NULL;
@@ -979,22 +1033,30 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
979 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(); 1033 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f();
980 u32 cilp_preempt_option = 1034 u32 cilp_preempt_option =
981 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(); 1035 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f();
1036 u32 cta_preempt_option =
1037 ctxsw_prog_main_image_compute_preemption_options_control_cta_f();
982 int err; 1038 int err;
983 1039
984 gk20a_dbg_fn(""); 1040 gk20a_dbg_fn("");
985 1041
986 if (gr_ctx->preempt_mode == NVGPU_GR_PREEMPTION_MODE_GFXP) { 1042 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) {
987 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 1043 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option);
988 gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_graphics_preemption_options_o(), 0, 1044 gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_graphics_preemption_options_o(), 0,
989 gfxp_preempt_option); 1045 gfxp_preempt_option);
990 } 1046 }
991 1047
992 if (gr_ctx->preempt_mode == NVGPU_GR_PREEMPTION_MODE_CILP) { 1048 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
993 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 1049 gk20a_dbg_info("CILP: %x", cilp_preempt_option);
994 gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_compute_preemption_options_o(), 0, 1050 gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_compute_preemption_options_o(), 0,
995 cilp_preempt_option); 1051 cilp_preempt_option);
996 } 1052 }
997 1053
1054 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) {
1055 gk20a_dbg_info("CTA: %x", cta_preempt_option);
1056 gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_compute_preemption_options_o(), 0,
1057 cta_preempt_option);
1058 }
1059
998 if (gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va) { 1060 if (gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va) {
999 u32 addr; 1061 u32 addr;
1000 u32 size; 1062 u32 size;
@@ -1547,8 +1609,8 @@ static int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
1547 bool *early_exit, bool *ignore_debugger) 1609 bool *early_exit, bool *ignore_debugger)
1548{ 1610{
1549 int ret; 1611 int ret;
1550 bool cilp_enabled = (fault_ch->ch_ctx.gr_ctx->preempt_mode == 1612 bool cilp_enabled = (fault_ch->ch_ctx.gr_ctx->compute_preempt_mode ==
1551 NVGPU_GR_PREEMPTION_MODE_CILP) ; 1613 NVGPU_COMPUTE_PREEMPTION_MODE_CILP) ;
1552 u32 global_mask = 0, dbgr_control0, global_esr_copy; 1614 u32 global_mask = 0, dbgr_control0, global_esr_copy;
1553 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 1615 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
1554 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 1616 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
@@ -1763,7 +1825,7 @@ static bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
1763 if (gk20a_is_channel_ctx_resident(ch)) { 1825 if (gk20a_is_channel_ctx_resident(ch)) {
1764 gk20a_suspend_all_sms(g, 0, false); 1826 gk20a_suspend_all_sms(g, 0, false);
1765 1827
1766 if (gr_ctx->preempt_mode == NVGPU_GR_PREEMPTION_MODE_CILP) { 1828 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
1767 err = gr_gp10b_set_cilp_preempt_pending(g, ch); 1829 err = gr_gp10b_set_cilp_preempt_pending(g, ch);
1768 if (err) 1830 if (err)
1769 gk20a_err(dev_from_gk20a(g), 1831 gk20a_err(dev_from_gk20a(g),
@@ -1852,6 +1914,63 @@ clean_up:
1852 return err; 1914 return err;
1853} 1915}
1854 1916
1917static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
1918 u32 graphics_preempt_mode,
1919 u32 compute_preempt_mode)
1920{
1921 struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx;
1922 struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx;
1923 struct gk20a *g = ch->g;
1924 struct tsg_gk20a *tsg;
1925 struct vm_gk20a *vm;
1926 void *ctx_ptr;
1927 u32 class;
1928 int err = 0;
1929
1930 class = ch->obj_class;
1931 if (!class)
1932 return -EINVAL;
1933
1934 /* preemption already set ? */
1935 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode)
1936 return -EINVAL;
1937
1938 if (gk20a_is_channel_marked_as_tsg(ch)) {
1939 tsg = &g->fifo.tsg[ch->tsgid];
1940 vm = tsg->vm;
1941 } else {
1942 vm = ch->vm;
1943 }
1944
1945 err = gr_gp10b_set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
1946 graphics_preempt_mode, compute_preempt_mode);
1947 if (err)
1948 return err;
1949
1950 ctx_ptr = vmap(gr_ctx->mem.pages,
1951 PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
1952 0, pgprot_writecombine(PAGE_KERNEL));
1953 if (!ctx_ptr)
1954 return -ENOMEM;
1955
1956 g->ops.fifo.disable_channel(ch);
1957 err = g->ops.fifo.preempt_channel(g, ch->hw_chid);
1958 if (err)
1959 goto unmap_ctx;
1960
1961 if (g->ops.gr.update_ctxsw_preemption_mode) {
1962 g->ops.gr.update_ctxsw_preemption_mode(ch->g, ch_ctx, ctx_ptr);
1963 g->ops.gr.commit_global_cb_manager(g, ch, true);
1964 }
1965
1966 g->ops.fifo.enable_channel(ch);
1967
1968unmap_ctx:
1969 vunmap(ctx_ptr);
1970
1971 return err;
1972}
1973
1855void gp10b_init_gr(struct gpu_ops *gops) 1974void gp10b_init_gr(struct gpu_ops *gops)
1856{ 1975{
1857 gm20b_init_gr(gops); 1976 gm20b_init_gr(gops);
@@ -1891,4 +2010,5 @@ void gp10b_init_gr(struct gpu_ops *gops)
1891 gops->gr.create_gr_sysfs = gr_gp10b_create_sysfs; 2010 gops->gr.create_gr_sysfs = gr_gp10b_create_sysfs;
1892 gops->gr.get_lrf_tex_ltc_dram_override = get_ecc_override_val; 2011 gops->gr.get_lrf_tex_ltc_dram_override = get_ecc_override_val;
1893 gops->gr.suspend_contexts = gr_gp10b_suspend_contexts; 2012 gops->gr.suspend_contexts = gr_gp10b_suspend_contexts;
2013 gops->gr.set_preemption_mode = gr_gp10b_set_preemption_mode;
1894} 2014}
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.h b/drivers/gpu/nvgpu/gp10b/gr_gp10b.h
index edf536f5..8c544f14 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.h
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.h
@@ -96,7 +96,4 @@ struct gr_ctx_desc_t18x {
96 bool cilp_preempt_pending; 96 bool cilp_preempt_pending;
97}; 97};
98 98
99#define NVGPU_GR_PREEMPTION_MODE_GFXP 1
100#define NVGPU_GR_PREEMPTION_MODE_CILP 3
101
102#endif 99#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index 3023ef4b..08793e18 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -144,21 +144,21 @@ static int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
144 desc->gpu_va; 144 desc->gpu_va;
145 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size; 145 p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size;
146 146
147 gr_ctx->preempt_mode = NVGPU_GR_PREEMPTION_MODE_GFXP; 147 gr_ctx->graphics_preempt_mode = NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
148 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP; 148 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
149 } 149 }
150 150
151 if (class == PASCAL_COMPUTE_A) { 151 if (class == PASCAL_COMPUTE_A) {
152 if (flags & NVGPU_ALLOC_OBJ_FLAGS_CILP) { 152 if (flags & NVGPU_ALLOC_OBJ_FLAGS_CILP) {
153 gr_ctx->preempt_mode = NVGPU_GR_PREEMPTION_MODE_CILP; 153 gr_ctx->compute_preempt_mode = NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
154 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP; 154 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
155 } else { 155 } else {
156 gr_ctx->preempt_mode = NVGPU_GR_PREEMPTION_MODE_CTA; 156 gr_ctx->compute_preempt_mode = NVGPU_COMPUTE_PREEMPTION_MODE_CTA;
157 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA; 157 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
158 } 158 }
159 } 159 }
160 160
161 if (gr_ctx->preempt_mode) { 161 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) {
162 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS; 162 msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS;
163 msg.handle = platform->virt_handle; 163 msg.handle = platform->virt_handle;
164 p->handle = gr_ctx->virt_ctx; 164 p->handle = gr_ctx->virt_ctx;
diff --git a/include/uapi/linux/nvgpu-t18x.h b/include/uapi/linux/nvgpu-t18x.h
index e2696b4c..1dce0c21 100644
--- a/include/uapi/linux/nvgpu-t18x.h
+++ b/include/uapi/linux/nvgpu-t18x.h
@@ -43,6 +43,11 @@
43#define NVGPU_ALLOC_OBJ_FLAGS_GFXP (1 << 1) 43#define NVGPU_ALLOC_OBJ_FLAGS_GFXP (1 << 1)
44#define NVGPU_ALLOC_OBJ_FLAGS_CILP (1 << 2) 44#define NVGPU_ALLOC_OBJ_FLAGS_CILP (1 << 2)
45 45
46/* Flags in nvgpu_preemption_mode_args.graphics_preempt_flags */
47#define NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP BIT(1)
48/* Flags in nvgpu_preemption_mode_args.compute_preempt_flags */
49#define NVGPU_COMPUTE_PREEMPTION_MODE_CILP BIT(2)
50
46/* SM LRF ECC is enabled */ 51/* SM LRF ECC is enabled */
47#define NVGPU_GPU_FLAGS_ECC_ENABLED_SM_LRF (1ULL << 60) 52#define NVGPU_GPU_FLAGS_ECC_ENABLED_SM_LRF (1ULL << 60)
48/* SM SHM ECC is enabled */ 53/* SM SHM ECC is enabled */