summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-22 13:00:24 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:15:09 -0400
commit633d331ae2db50fbcce829fe324c19fc44b82c24 (patch)
treeebf95d28c62a3bf81b68d44766459a343aeef62a /drivers/gpu/nvgpu
parent2766420dfbe15e539a4b9514bbf41480fc636a28 (diff)
gpu: nvgpu: Rename gk20a_mem_* functions
Rename the functions used for mem_desc access to nvgpu_mem_*. JIRA NVGPU-12 Change-Id: I5a1180c9a08d33c3dfc361ce8579c3c767fa5656 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1326193 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c32
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.c50
-rw-r--r--drivers/gpu/nvgpu/gv11b/subctx_gv11b.c24
3 files changed, 53 insertions, 53 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 57fd24de..dd56a6f9 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -117,20 +117,20 @@ static int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
117 117
118 gk20a_dbg_fn(""); 118 gk20a_dbg_fn("");
119 119
120 gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v()); 120 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
121 121
122 gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(), 122 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
123 pbdma_gp_base_offset_f( 123 pbdma_gp_base_offset_f(
124 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); 124 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
125 125
126 gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), 126 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
127 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | 127 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
128 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); 128 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
129 129
130 gk20a_mem_wr32(g, mem, ram_fc_signature_w(), 130 nvgpu_mem_wr32(g, mem, ram_fc_signature_w(),
131 c->g->ops.fifo.get_pbdma_signature(c->g)); 131 c->g->ops.fifo.get_pbdma_signature(c->g));
132 132
133 gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(), 133 nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(),
134 pbdma_pb_header_priv_user_f() | 134 pbdma_pb_header_priv_user_f() |
135 pbdma_pb_header_method_zero_f() | 135 pbdma_pb_header_method_zero_f() |
136 pbdma_pb_header_subchannel_zero_f() | 136 pbdma_pb_header_subchannel_zero_f() |
@@ -138,44 +138,44 @@ static int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
138 pbdma_pb_header_first_true_f() | 138 pbdma_pb_header_first_true_f() |
139 pbdma_pb_header_type_inc_f()); 139 pbdma_pb_header_type_inc_f());
140 140
141 gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(), 141 nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(),
142 pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) | 142 pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) |
143 pbdma_subdevice_status_active_f() | 143 pbdma_subdevice_status_active_f() |
144 pbdma_subdevice_channel_dma_enable_f()); 144 pbdma_subdevice_channel_dma_enable_f());
145 145
146 gk20a_mem_wr32(g, mem, ram_fc_target_w(), 146 nvgpu_mem_wr32(g, mem, ram_fc_target_w(),
147 pbdma_target_eng_ctx_valid_true_f() | 147 pbdma_target_eng_ctx_valid_true_f() |
148 pbdma_target_ce_ctx_valid_true_f() | 148 pbdma_target_ce_ctx_valid_true_f() |
149 pbdma_target_engine_sw_f()); 149 pbdma_target_engine_sw_f());
150 150
151 gk20a_mem_wr32(g, mem, ram_fc_acquire_w(), 151 nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(),
152 g->ops.fifo.pbdma_acquire_val(acquire_timeout)); 152 g->ops.fifo.pbdma_acquire_val(acquire_timeout));
153 153
154 gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), 154 nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
155 pbdma_runlist_timeslice_timeout_128_f() | 155 pbdma_runlist_timeslice_timeout_128_f() |
156 pbdma_runlist_timeslice_timescale_3_f() | 156 pbdma_runlist_timeslice_timescale_3_f() |
157 pbdma_runlist_timeslice_enable_true_f()); 157 pbdma_runlist_timeslice_enable_true_f());
158 158
159 159
160 gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 160 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
161 161
162 /* Until full subcontext is supported, always use VEID0 */ 162 /* Until full subcontext is supported, always use VEID0 */
163 gk20a_mem_wr32(g, mem, ram_fc_set_channel_info_w(), 163 nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(),
164 pbdma_set_channel_info_scg_type_graphics_compute0_f() | 164 pbdma_set_channel_info_scg_type_graphics_compute0_f() |
165 pbdma_set_channel_info_veid_f(CHANNEL_INFO_VEID0)); 165 pbdma_set_channel_info_veid_f(CHANNEL_INFO_VEID0));
166 166
167 if (c->is_privileged_channel) { 167 if (c->is_privileged_channel) {
168 /* Set privilege level for channel */ 168 /* Set privilege level for channel */
169 gk20a_mem_wr32(g, mem, ram_fc_config_w(), 169 nvgpu_mem_wr32(g, mem, ram_fc_config_w(),
170 pbdma_config_auth_level_privileged_f()); 170 pbdma_config_auth_level_privileged_f());
171 171
172 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 172 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
173 } 173 }
174 174
175 /* Enable userd writeback */ 175 /* Enable userd writeback */
176 data = gk20a_mem_rd32(g, mem, ram_fc_config_w()); 176 data = nvgpu_mem_rd32(g, mem, ram_fc_config_w());
177 data = data | pbdma_config_userd_writeback_enable_f(); 177 data = data | pbdma_config_userd_writeback_enable_f();
178 gk20a_mem_wr32(g, mem, ram_fc_config_w(),data); 178 nvgpu_mem_wr32(g, mem, ram_fc_config_w(),data);
179 179
180 gv11b_userd_writeback_config(g); 180 gv11b_userd_writeback_config(g);
181 181
@@ -196,7 +196,7 @@ static u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c)
196 struct mem_desc *userd_mem = &g->fifo.userd; 196 struct mem_desc *userd_mem = &g->fifo.userd;
197 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32)); 197 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32));
198 198
199 return gk20a_mem_rd32(g, userd_mem, 199 return nvgpu_mem_rd32(g, userd_mem,
200 offset + ram_userd_gp_get_w()); 200 offset + ram_userd_gp_get_w());
201} 201}
202 202
@@ -205,7 +205,7 @@ static void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c)
205 struct mem_desc *userd_mem = &g->fifo.userd; 205 struct mem_desc *userd_mem = &g->fifo.userd;
206 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32)); 206 u32 offset = c->hw_chid * (g->fifo.userd_entry_size / sizeof(u32));
207 207
208 gk20a_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(), 208 nvgpu_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(),
209 c->gpfifo.put); 209 c->gpfifo.put);
210 /* commit everything to cpu */ 210 /* commit everything to cpu */
211 smp_mb(); 211 smp_mb();
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
index d109dbf8..ddaaa350 100644
--- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
@@ -780,36 +780,36 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
780{ 780{
781 struct mem_desc *mem = &gr_ctx->mem; 781 struct mem_desc *mem = &gr_ctx->mem;
782 782
783 if (gk20a_mem_begin(g, mem)) { 783 if (nvgpu_mem_begin(g, mem)) {
784 WARN_ON("Cannot map context"); 784 WARN_ON("Cannot map context");
785 return; 785 return;
786 } 786 }
787 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", 787 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n",
788 gk20a_mem_rd(g, mem, 788 nvgpu_mem_rd(g, mem,
789 ctxsw_prog_main_image_magic_value_o()), 789 ctxsw_prog_main_image_magic_value_o()),
790 ctxsw_prog_main_image_magic_value_v_value_v()); 790 ctxsw_prog_main_image_magic_value_v_value_v());
791 791
792 792
793 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", 793 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n",
794 gk20a_mem_rd(g, mem, 794 nvgpu_mem_rd(g, mem,
795 ctxsw_prog_main_image_num_save_ops_o())); 795 ctxsw_prog_main_image_num_save_ops_o()));
796 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", 796 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n",
797 gk20a_mem_rd(g, mem, 797 nvgpu_mem_rd(g, mem,
798 ctxsw_prog_main_image_num_wfi_save_ops_o())); 798 ctxsw_prog_main_image_num_wfi_save_ops_o()));
799 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", 799 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n",
800 gk20a_mem_rd(g, mem, 800 nvgpu_mem_rd(g, mem,
801 ctxsw_prog_main_image_num_cta_save_ops_o())); 801 ctxsw_prog_main_image_num_cta_save_ops_o()));
802 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", 802 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n",
803 gk20a_mem_rd(g, mem, 803 nvgpu_mem_rd(g, mem,
804 ctxsw_prog_main_image_num_gfxp_save_ops_o())); 804 ctxsw_prog_main_image_num_gfxp_save_ops_o()));
805 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", 805 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n",
806 gk20a_mem_rd(g, mem, 806 nvgpu_mem_rd(g, mem,
807 ctxsw_prog_main_image_num_cilp_save_ops_o())); 807 ctxsw_prog_main_image_num_cilp_save_ops_o()));
808 gk20a_err(dev_from_gk20a(g), 808 gk20a_err(dev_from_gk20a(g),
809 "image gfx preemption option (GFXP is 1) %x\n", 809 "image gfx preemption option (GFXP is 1) %x\n",
810 gk20a_mem_rd(g, mem, 810 nvgpu_mem_rd(g, mem,
811 ctxsw_prog_main_image_graphics_preemption_options_o())); 811 ctxsw_prog_main_image_graphics_preemption_options_o()));
812 gk20a_mem_end(g, mem); 812 nvgpu_mem_end(g, mem);
813} 813}
814 814
815static void gr_gv11b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, 815static void gr_gv11b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
@@ -847,13 +847,13 @@ static void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
847 847
848 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) { 848 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) {
849 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 849 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option);
850 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o(), 850 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o(),
851 gfxp_preempt_option); 851 gfxp_preempt_option);
852 } 852 }
853 853
854 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) { 854 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
855 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 855 gk20a_dbg_info("CILP: %x", cilp_preempt_option);
856 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), 856 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(),
857 cilp_preempt_option); 857 cilp_preempt_option);
858 } 858 }
859 859
@@ -862,7 +862,7 @@ static void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
862 u32 size; 862 u32 size;
863 u32 cbes_reserve; 863 u32 cbes_reserve;
864 864
865 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_full_preemption_ptr_o(), 865 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_full_preemption_ptr_o(),
866 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8); 866 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8);
867 867
868 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx); 868 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
@@ -1858,12 +1858,12 @@ static int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va)
1858 addr_hi = u64_hi32(ctx->mem.gpu_va); 1858 addr_hi = u64_hi32(ctx->mem.gpu_va);
1859 1859
1860 /* point this address to engine_wfi_ptr */ 1860 /* point this address to engine_wfi_ptr */
1861 gk20a_mem_wr32(c->g, &c->inst_block, ram_in_engine_wfi_target_w(), 1861 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_engine_wfi_target_w(),
1862 ram_in_engine_cs_wfi_v() | 1862 ram_in_engine_cs_wfi_v() |
1863 ram_in_engine_wfi_mode_f(ram_in_engine_wfi_mode_virtual_v()) | 1863 ram_in_engine_wfi_mode_f(ram_in_engine_wfi_mode_virtual_v()) |
1864 ram_in_engine_wfi_ptr_lo_f(addr_lo)); 1864 ram_in_engine_wfi_ptr_lo_f(addr_lo));
1865 1865
1866 gk20a_mem_wr32(c->g, &c->inst_block, ram_in_engine_wfi_ptr_hi_w(), 1866 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_engine_wfi_ptr_hi_w(),
1867 ram_in_engine_wfi_ptr_hi_f(addr_hi)); 1867 ram_in_engine_wfi_ptr_hi_f(addr_hi));
1868 1868
1869 return 0; 1869 return 0;
@@ -1930,20 +1930,20 @@ static void gv11b_restore_context_header(struct gk20a *g,
1930 u32 va_lo, va_hi; 1930 u32 va_lo, va_hi;
1931 struct gr_gk20a *gr = &g->gr; 1931 struct gr_gk20a *gr = &g->gr;
1932 1932
1933 va_hi = gk20a_mem_rd(g, ctxheader, 1933 va_hi = nvgpu_mem_rd(g, ctxheader,
1934 ctxsw_prog_main_image_context_buffer_ptr_hi_o()); 1934 ctxsw_prog_main_image_context_buffer_ptr_hi_o());
1935 va_lo = gk20a_mem_rd(g, ctxheader, 1935 va_lo = nvgpu_mem_rd(g, ctxheader,
1936 ctxsw_prog_main_image_context_buffer_ptr_o()); 1936 ctxsw_prog_main_image_context_buffer_ptr_o());
1937 gk20a_mem_wr_n(g, ctxheader, 0, 1937 nvgpu_mem_wr_n(g, ctxheader, 0,
1938 gr->ctx_vars.local_golden_image, 1938 gr->ctx_vars.local_golden_image,
1939 gr->ctx_vars.golden_image_size); 1939 gr->ctx_vars.golden_image_size);
1940 gk20a_mem_wr(g, ctxheader, 1940 nvgpu_mem_wr(g, ctxheader,
1941 ctxsw_prog_main_image_context_buffer_ptr_hi_o(), va_hi); 1941 ctxsw_prog_main_image_context_buffer_ptr_hi_o(), va_hi);
1942 gk20a_mem_wr(g, ctxheader, 1942 nvgpu_mem_wr(g, ctxheader,
1943 ctxsw_prog_main_image_context_buffer_ptr_o(), va_lo); 1943 ctxsw_prog_main_image_context_buffer_ptr_o(), va_lo);
1944 gk20a_mem_wr(g, ctxheader, 1944 nvgpu_mem_wr(g, ctxheader,
1945 ctxsw_prog_main_image_num_restore_ops_o(), 0); 1945 ctxsw_prog_main_image_num_restore_ops_o(), 0);
1946 gk20a_mem_wr(g, ctxheader, 1946 nvgpu_mem_wr(g, ctxheader,
1947 ctxsw_prog_main_image_num_save_ops_o(), 0); 1947 ctxsw_prog_main_image_num_save_ops_o(), 0);
1948} 1948}
1949static void gr_gv11b_write_zcull_ptr(struct gk20a *g, 1949static void gr_gv11b_write_zcull_ptr(struct gk20a *g,
@@ -1954,9 +1954,9 @@ static void gr_gv11b_write_zcull_ptr(struct gk20a *g,
1954 gpu_va = gpu_va >> 8; 1954 gpu_va = gpu_va >> 8;
1955 va_lo = u64_lo32(gpu_va); 1955 va_lo = u64_lo32(gpu_va);
1956 va_hi = u64_hi32(gpu_va); 1956 va_hi = u64_hi32(gpu_va);
1957 gk20a_mem_wr(g, mem, 1957 nvgpu_mem_wr(g, mem,
1958 ctxsw_prog_main_image_zcull_ptr_o(), va_lo); 1958 ctxsw_prog_main_image_zcull_ptr_o(), va_lo);
1959 gk20a_mem_wr(g, mem, 1959 nvgpu_mem_wr(g, mem,
1960 ctxsw_prog_main_image_zcull_ptr_hi_o(), va_hi); 1960 ctxsw_prog_main_image_zcull_ptr_hi_o(), va_hi);
1961} 1961}
1962 1962
@@ -1969,9 +1969,9 @@ static void gr_gv11b_write_pm_ptr(struct gk20a *g,
1969 gpu_va = gpu_va >> 8; 1969 gpu_va = gpu_va >> 8;
1970 va_lo = u64_lo32(gpu_va); 1970 va_lo = u64_lo32(gpu_va);
1971 va_hi = u64_hi32(gpu_va); 1971 va_hi = u64_hi32(gpu_va);
1972 gk20a_mem_wr(g, mem, 1972 nvgpu_mem_wr(g, mem,
1973 ctxsw_prog_main_image_pm_ptr_o(), va_lo); 1973 ctxsw_prog_main_image_pm_ptr_o(), va_lo);
1974 gk20a_mem_wr(g, mem, 1974 nvgpu_mem_wr(g, mem,
1975 ctxsw_prog_main_image_pm_ptr_hi_o(), va_hi); 1975 ctxsw_prog_main_image_pm_ptr_hi_o(), va_hi);
1976} 1976}
1977 1977
diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c
index 6344b5fb..8bf0631e 100644
--- a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c
@@ -73,11 +73,11 @@ int gv11b_alloc_subctx_header(struct channel_gk20a *c)
73 return -ENOMEM; 73 return -ENOMEM;
74 } 74 }
75 /* Now clear the buffer */ 75 /* Now clear the buffer */
76 if (gk20a_mem_begin(g, &ctx->mem)) 76 if (nvgpu_mem_begin(g, &ctx->mem))
77 return -ENOMEM; 77 return -ENOMEM;
78 78
79 gk20a_memset(g, &ctx->mem, 0, 0, ctx->mem.size); 79 nvgpu_memset(g, &ctx->mem, 0, 0, ctx->mem.size);
80 gk20a_mem_end(g, &ctx->mem); 80 nvgpu_mem_end(g, &ctx->mem);
81 81
82 gv11b_init_subcontext_pdb(c, &c->inst_block); 82 gv11b_init_subcontext_pdb(c, &c->inst_block);
83 83
@@ -111,14 +111,14 @@ static void gv11b_init_subcontext_pdb(struct channel_gk20a *c,
111 ram_in_sc_page_dir_base_lo_0_f(pdb_addr_lo); 111 ram_in_sc_page_dir_base_lo_0_f(pdb_addr_lo);
112 lo = ram_in_sc_page_dir_base_vol_0_w(); 112 lo = ram_in_sc_page_dir_base_vol_0_w();
113 hi = ram_in_sc_page_dir_base_hi_0_w(); 113 hi = ram_in_sc_page_dir_base_hi_0_w();
114 gk20a_mem_wr32(g, inst_block, lo, format_word); 114 nvgpu_mem_wr32(g, inst_block, lo, format_word);
115 gk20a_mem_wr32(g, inst_block, hi, pdb_addr_hi); 115 nvgpu_mem_wr32(g, inst_block, hi, pdb_addr_hi);
116 116
117 /* make subcontext0 address space to valid */ 117 /* make subcontext0 address space to valid */
118 /* TODO fix proper hw register definations */ 118 /* TODO fix proper hw register definations */
119 gk20a_mem_wr32(g, inst_block, 166, 0x1); 119 nvgpu_mem_wr32(g, inst_block, 166, 0x1);
120 gk20a_mem_wr32(g, inst_block, 167, 0); 120 nvgpu_mem_wr32(g, inst_block, 167, 0);
121 gk20a_mem_wr32(g, inst_block, ram_in_engine_wfi_veid_w(), 121 nvgpu_mem_wr32(g, inst_block, ram_in_engine_wfi_veid_w(),
122 ram_in_engine_wfi_veid_f(0)); 122 ram_in_engine_wfi_veid_f(0));
123 123
124} 124}
@@ -136,13 +136,13 @@ int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va)
136 136
137 gr_mem = &ctx->mem; 137 gr_mem = &ctx->mem;
138 g->ops.mm.l2_flush(g, true); 138 g->ops.mm.l2_flush(g, true);
139 if (gk20a_mem_begin(g, gr_mem)) 139 if (nvgpu_mem_begin(g, gr_mem))
140 return -ENOMEM; 140 return -ENOMEM;
141 141
142 gk20a_mem_wr(g, gr_mem, 142 nvgpu_mem_wr(g, gr_mem,
143 ctxsw_prog_main_image_context_buffer_ptr_hi_o(), addr_hi); 143 ctxsw_prog_main_image_context_buffer_ptr_hi_o(), addr_hi);
144 gk20a_mem_wr(g, gr_mem, 144 nvgpu_mem_wr(g, gr_mem,
145 ctxsw_prog_main_image_context_buffer_ptr_o(), addr_lo); 145 ctxsw_prog_main_image_context_buffer_ptr_o(), addr_lo);
146 gk20a_mem_end(g, gr_mem); 146 nvgpu_mem_end(g, gr_mem);
147 return ret; 147 return ret;
148} 148}