summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2016-05-06 07:34:07 -0400
committerKen Adams <kadams@nvidia.com>2016-05-13 10:12:04 -0400
commitd089e402355f3533b18a50a4e9fe7423593762af (patch)
tree89abdb8b97785e4cef47244d182b1e7faae2d291 /drivers
parentc8b6a331d1e30595c5798fc3121575c1ab21e2ae (diff)
gpu: nvgpu: refactor gk20a_mem_{wr,rd} for vidmem
To support vidmem, pass g and mem_desc to the buffer memory accessor functions. This allows the functions to select the memory access method based on the buffer aperture instead of using the cpu pointer directly (like until now). The selection and aperture support will be in another patch; this patch only refactors these accessors, but keeps the underlying functionality as-is. JIRA DNVGPU-23 Change-Id: Ie2cc17c4a0315d03a66e92fb635c217840d5399e Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1128863 GVS: Gerrit_Virtual_Submit Reviewed-by: Ken Adams <kadams@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.c61
1 files changed, 30 insertions, 31 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
index 5dee0921..64bfa773 100644
--- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
@@ -897,52 +897,51 @@ fail_free_gk20a_ctx:
897} 897}
898 898
899static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm, 899static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
900 struct gr_ctx_desc *gr_ctx) { 900 struct gr_ctx_desc *gr_ctx)
901 void *ctx_ptr = vmap(gr_ctx->mem.pages, 901{
902 PAGE_ALIGN(gr_ctx->mem.size) >> PAGE_SHIFT, 902 struct mem_desc *mem = &gr_ctx->mem;
903 0, pgprot_writecombine(PAGE_KERNEL)); 903
904 if (!ctx_ptr) { 904 if (gk20a_mem_begin(g, mem)) {
905 WARN_ON("Cannot map context"); 905 WARN_ON("Cannot map context");
906 return; 906 return;
907 } 907 }
908 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", 908 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n",
909 gk20a_mem_rd32(ctx_ptr + 909 gk20a_mem_rd(g, mem,
910 ctxsw_prog_main_image_magic_value_o(), 0), 910 ctxsw_prog_main_image_magic_value_o()),
911 ctxsw_prog_main_image_magic_value_v_value_v()); 911 ctxsw_prog_main_image_magic_value_v_value_v());
912 912
913 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n", 913 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n",
914 gk20a_mem_rd32(ctx_ptr + 914 gk20a_mem_rd(g, mem,
915 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(), 0)); 915 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
916 916
917 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n", 917 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n",
918 gk20a_mem_rd32(ctx_ptr + 918 gk20a_mem_rd(g, mem,
919 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(), 0)); 919 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
920 920
921 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n", 921 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n",
922 gk20a_mem_rd32(ctx_ptr + 922 gk20a_mem_rd(g, mem,
923 ctxsw_prog_main_image_context_timestamp_buffer_control_o(), 0)); 923 ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
924 924
925 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", 925 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n",
926 gk20a_mem_rd32(ctx_ptr + 926 gk20a_mem_rd(g, mem,
927 ctxsw_prog_main_image_num_save_ops_o(), 0)); 927 ctxsw_prog_main_image_num_save_ops_o()));
928 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", 928 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n",
929 gk20a_mem_rd32(ctx_ptr + 929 gk20a_mem_rd(g, mem,
930 ctxsw_prog_main_image_num_wfi_save_ops_o(), 0)); 930 ctxsw_prog_main_image_num_wfi_save_ops_o()));
931 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", 931 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n",
932 gk20a_mem_rd32(ctx_ptr + 932 gk20a_mem_rd(g, mem,
933 ctxsw_prog_main_image_num_cta_save_ops_o(), 0)); 933 ctxsw_prog_main_image_num_cta_save_ops_o()));
934 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", 934 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n",
935 gk20a_mem_rd32(ctx_ptr + 935 gk20a_mem_rd(g, mem,
936 ctxsw_prog_main_image_num_gfxp_save_ops_o(), 0)); 936 ctxsw_prog_main_image_num_gfxp_save_ops_o()));
937 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", 937 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n",
938 gk20a_mem_rd32(ctx_ptr + 938 gk20a_mem_rd(g, mem,
939 ctxsw_prog_main_image_num_cilp_save_ops_o(), 0)); 939 ctxsw_prog_main_image_num_cilp_save_ops_o()));
940 gk20a_err(dev_from_gk20a(g), 940 gk20a_err(dev_from_gk20a(g),
941 "image gfx preemption option (GFXP is 1) %x\n", 941 "image gfx preemption option (GFXP is 1) %x\n",
942 gk20a_mem_rd32(ctx_ptr + 942 gk20a_mem_rd(g, mem,
943 ctxsw_prog_main_image_graphics_preemption_options_o(), 943 ctxsw_prog_main_image_graphics_preemption_options_o()));
944 0)); 944 gk20a_mem_end(g, mem);
945 vunmap(ctx_ptr);
946} 945}
947 946
948static void gr_gv11b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, 947static void gr_gv11b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
@@ -967,7 +966,7 @@ static void gr_gv11b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
967 966
968static void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, 967static void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
969 struct channel_ctx_gk20a *ch_ctx, 968 struct channel_ctx_gk20a *ch_ctx,
970 void *ctx_ptr) 969 struct mem_desc *mem)
971{ 970{
972 struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx; 971 struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx;
973 u32 gfxp_preempt_option = 972 u32 gfxp_preempt_option =
@@ -980,13 +979,13 @@ static void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
980 979
981 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) { 980 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) {
982 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 981 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option);
983 gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_graphics_preemption_options_o(), 0, 982 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o(),
984 gfxp_preempt_option); 983 gfxp_preempt_option);
985 } 984 }
986 985
987 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) { 986 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
988 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 987 gk20a_dbg_info("CILP: %x", cilp_preempt_option);
989 gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_compute_preemption_options_o(), 0, 988 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(),
990 cilp_preempt_option); 989 cilp_preempt_option);
991 } 990 }
992 991
@@ -995,7 +994,7 @@ static void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
995 u32 size; 994 u32 size;
996 u32 cbes_reserve; 995 u32 cbes_reserve;
997 996
998 gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_full_preemption_ptr_o(), 0, 997 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_full_preemption_ptr_o(),
999 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8); 998 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8);
1000 999
1001 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx); 1000 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);