summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-15 19:42:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:14:48 -0400
commitb69020bff5dfa69cad926c9374cdbe9a62509ffd (patch)
tree222f6b6bc23561a38004a257cbac401e431ff3be /drivers/gpu/nvgpu/gp10b
parentfa4ecf5730a75269e85cc41c2ad2ee61307e72a9 (diff)
gpu: nvgpu: Rename gk20a_mem_* functions
Rename the functions used for mem_desc access to nvgpu_mem_*. JIRA NVGPU-12 Change-Id: Ibfdc1112d43f0a125e4487c250e3f977ffd2cd75 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1323325 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b')
-rw-r--r--drivers/gpu/nvgpu/gp10b/fifo_gp10b.c40
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c44
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c22
3 files changed, 53 insertions, 53 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
index 6f1a0298..3787662b 100644
--- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
@@ -33,18 +33,18 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g,
33 33
34 gk20a_dbg_fn(""); 34 gk20a_dbg_fn("");
35 35
36 val = gk20a_mem_rd32(g, mem, 36 val = nvgpu_mem_rd32(g, mem,
37 ram_in_page_dir_base_fault_replay_tex_w()); 37 ram_in_page_dir_base_fault_replay_tex_w());
38 val &= ~ram_in_page_dir_base_fault_replay_tex_m(); 38 val &= ~ram_in_page_dir_base_fault_replay_tex_m();
39 val |= ram_in_page_dir_base_fault_replay_tex_true_f(); 39 val |= ram_in_page_dir_base_fault_replay_tex_true_f();
40 gk20a_mem_wr32(g, mem, 40 nvgpu_mem_wr32(g, mem,
41 ram_in_page_dir_base_fault_replay_tex_w(), val); 41 ram_in_page_dir_base_fault_replay_tex_w(), val);
42 42
43 val = gk20a_mem_rd32(g, mem, 43 val = nvgpu_mem_rd32(g, mem,
44 ram_in_page_dir_base_fault_replay_gcc_w()); 44 ram_in_page_dir_base_fault_replay_gcc_w());
45 val &= ~ram_in_page_dir_base_fault_replay_gcc_m(); 45 val &= ~ram_in_page_dir_base_fault_replay_gcc_m();
46 val |= ram_in_page_dir_base_fault_replay_gcc_true_f(); 46 val |= ram_in_page_dir_base_fault_replay_gcc_true_f();
47 gk20a_mem_wr32(g, mem, 47 nvgpu_mem_wr32(g, mem,
48 ram_in_page_dir_base_fault_replay_gcc_w(), val); 48 ram_in_page_dir_base_fault_replay_gcc_w(), val);
49 49
50 gk20a_dbg_fn("done"); 50 gk20a_dbg_fn("done");
@@ -64,14 +64,14 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c)
64 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 64 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
65 c->hw_chid, (u64)c->userd_iova); 65 c->hw_chid, (u64)c->userd_iova);
66 66
67 gk20a_mem_wr32(g, &c->inst_block, 67 nvgpu_mem_wr32(g, &c->inst_block,
68 ram_in_ramfc_w() + ram_fc_userd_w(), 68 ram_in_ramfc_w() + ram_fc_userd_w(),
69 (g->mm.vidmem_is_vidmem ? 69 (g->mm.vidmem_is_vidmem ?
70 pbdma_userd_target_sys_mem_ncoh_f() : 70 pbdma_userd_target_sys_mem_ncoh_f() :
71 pbdma_userd_target_vid_mem_f()) | 71 pbdma_userd_target_vid_mem_f()) |
72 pbdma_userd_addr_f(addr_lo)); 72 pbdma_userd_addr_f(addr_lo));
73 73
74 gk20a_mem_wr32(g, &c->inst_block, 74 nvgpu_mem_wr32(g, &c->inst_block,
75 ram_in_ramfc_w() + ram_fc_userd_hi_w(), 75 ram_in_ramfc_w() + ram_fc_userd_hi_w(),
76 pbdma_userd_hi_addr_f(addr_hi)); 76 pbdma_userd_hi_addr_f(addr_hi));
77 77
@@ -87,25 +87,25 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
87 87
88 gk20a_dbg_fn(""); 88 gk20a_dbg_fn("");
89 89
90 gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v()); 90 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
91 91
92 gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(), 92 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
93 pbdma_gp_base_offset_f( 93 pbdma_gp_base_offset_f(
94 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); 94 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
95 95
96 gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), 96 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
97 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | 97 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
98 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); 98 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
99 99
100 gk20a_mem_wr32(g, mem, ram_fc_signature_w(), 100 nvgpu_mem_wr32(g, mem, ram_fc_signature_w(),
101 c->g->ops.fifo.get_pbdma_signature(c->g)); 101 c->g->ops.fifo.get_pbdma_signature(c->g));
102 102
103 gk20a_mem_wr32(g, mem, ram_fc_formats_w(), 103 nvgpu_mem_wr32(g, mem, ram_fc_formats_w(),
104 pbdma_formats_gp_fermi0_f() | 104 pbdma_formats_gp_fermi0_f() |
105 pbdma_formats_pb_fermi1_f() | 105 pbdma_formats_pb_fermi1_f() |
106 pbdma_formats_mp_fermi0_f()); 106 pbdma_formats_mp_fermi0_f());
107 107
108 gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(), 108 nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(),
109 pbdma_pb_header_priv_user_f() | 109 pbdma_pb_header_priv_user_f() |
110 pbdma_pb_header_method_zero_f() | 110 pbdma_pb_header_method_zero_f() |
111 pbdma_pb_header_subchannel_zero_f() | 111 pbdma_pb_header_subchannel_zero_f() |
@@ -113,17 +113,17 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
113 pbdma_pb_header_first_true_f() | 113 pbdma_pb_header_first_true_f() |
114 pbdma_pb_header_type_inc_f()); 114 pbdma_pb_header_type_inc_f());
115 115
116 gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(), 116 nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(),
117 pbdma_subdevice_id_f(1) | 117 pbdma_subdevice_id_f(1) |
118 pbdma_subdevice_status_active_f() | 118 pbdma_subdevice_status_active_f() |
119 pbdma_subdevice_channel_dma_enable_f()); 119 pbdma_subdevice_channel_dma_enable_f());
120 120
121 gk20a_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); 121 nvgpu_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f());
122 122
123 gk20a_mem_wr32(g, mem, ram_fc_acquire_w(), 123 nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(),
124 g->ops.fifo.pbdma_acquire_val(acquire_timeout)); 124 g->ops.fifo.pbdma_acquire_val(acquire_timeout));
125 125
126 gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), 126 nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
127 pbdma_runlist_timeslice_timeout_128_f() | 127 pbdma_runlist_timeslice_timeout_128_f() |
128 pbdma_runlist_timeslice_timescale_3_f() | 128 pbdma_runlist_timeslice_timescale_3_f() |
129 pbdma_runlist_timeslice_enable_true_f()); 129 pbdma_runlist_timeslice_enable_true_f());
@@ -132,11 +132,11 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
132 gp10b_set_pdb_fault_replay_flags(c->g, mem); 132 gp10b_set_pdb_fault_replay_flags(c->g, mem);
133 133
134 134
135 gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 135 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
136 136
137 if (c->is_privileged_channel) { 137 if (c->is_privileged_channel) {
138 /* Set privilege level for channel */ 138 /* Set privilege level for channel */
139 gk20a_mem_wr32(g, mem, ram_fc_config_w(), 139 nvgpu_mem_wr32(g, mem, ram_fc_config_w(),
140 pbdma_config_auth_level_privileged_f()); 140 pbdma_config_auth_level_privileged_f());
141 141
142 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 142 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
@@ -158,7 +158,7 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
158 158
159 gk20a_dbg_fn(""); 159 gk20a_dbg_fn("");
160 160
161 v = gk20a_mem_rd32(c->g, &c->inst_block, 161 v = nvgpu_mem_rd32(c->g, &c->inst_block,
162 ram_fc_allowed_syncpoints_w()); 162 ram_fc_allowed_syncpoints_w());
163 old_syncpt = pbdma_allowed_syncpoints_0_index_v(v); 163 old_syncpt = pbdma_allowed_syncpoints_0_index_v(v);
164 if (c->sync) 164 if (c->sync)
@@ -178,7 +178,7 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
178 178
179 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); 179 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt);
180 180
181 gk20a_mem_wr32(c->g, &c->inst_block, 181 nvgpu_mem_wr32(c->g, &c->inst_block,
182 ram_fc_allowed_syncpoints_w(), v); 182 ram_fc_allowed_syncpoints_w(), v);
183 } 183 }
184 184
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 95590e40..fc831e75 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1039,51 +1039,51 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
1039{ 1039{
1040 struct mem_desc *mem = &gr_ctx->mem; 1040 struct mem_desc *mem = &gr_ctx->mem;
1041 1041
1042 if (gk20a_mem_begin(g, mem)) { 1042 if (nvgpu_mem_begin(g, mem)) {
1043 WARN_ON("Cannot map context"); 1043 WARN_ON("Cannot map context");
1044 return; 1044 return;
1045 } 1045 }
1046 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", 1046 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n",
1047 gk20a_mem_rd(g, mem, 1047 nvgpu_mem_rd(g, mem,
1048 ctxsw_prog_main_image_magic_value_o()), 1048 ctxsw_prog_main_image_magic_value_o()),
1049 ctxsw_prog_main_image_magic_value_v_value_v()); 1049 ctxsw_prog_main_image_magic_value_v_value_v());
1050 1050
1051 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n", 1051 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n",
1052 gk20a_mem_rd(g, mem, 1052 nvgpu_mem_rd(g, mem,
1053 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o())); 1053 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
1054 1054
1055 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n", 1055 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n",
1056 gk20a_mem_rd(g, mem, 1056 nvgpu_mem_rd(g, mem,
1057 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o())); 1057 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
1058 1058
1059 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n", 1059 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n",
1060 gk20a_mem_rd(g, mem, 1060 nvgpu_mem_rd(g, mem,
1061 ctxsw_prog_main_image_context_timestamp_buffer_control_o())); 1061 ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
1062 1062
1063 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", 1063 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n",
1064 gk20a_mem_rd(g, mem, 1064 nvgpu_mem_rd(g, mem,
1065 ctxsw_prog_main_image_num_save_ops_o())); 1065 ctxsw_prog_main_image_num_save_ops_o()));
1066 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", 1066 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n",
1067 gk20a_mem_rd(g, mem, 1067 nvgpu_mem_rd(g, mem,
1068 ctxsw_prog_main_image_num_wfi_save_ops_o())); 1068 ctxsw_prog_main_image_num_wfi_save_ops_o()));
1069 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", 1069 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n",
1070 gk20a_mem_rd(g, mem, 1070 nvgpu_mem_rd(g, mem,
1071 ctxsw_prog_main_image_num_cta_save_ops_o())); 1071 ctxsw_prog_main_image_num_cta_save_ops_o()));
1072 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", 1072 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n",
1073 gk20a_mem_rd(g, mem, 1073 nvgpu_mem_rd(g, mem,
1074 ctxsw_prog_main_image_num_gfxp_save_ops_o())); 1074 ctxsw_prog_main_image_num_gfxp_save_ops_o()));
1075 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", 1075 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n",
1076 gk20a_mem_rd(g, mem, 1076 nvgpu_mem_rd(g, mem,
1077 ctxsw_prog_main_image_num_cilp_save_ops_o())); 1077 ctxsw_prog_main_image_num_cilp_save_ops_o()));
1078 gk20a_err(dev_from_gk20a(g), 1078 gk20a_err(dev_from_gk20a(g),
1079 "image gfx preemption option (GFXP is 1) %x\n", 1079 "image gfx preemption option (GFXP is 1) %x\n",
1080 gk20a_mem_rd(g, mem, 1080 nvgpu_mem_rd(g, mem,
1081 ctxsw_prog_main_image_graphics_preemption_options_o())); 1081 ctxsw_prog_main_image_graphics_preemption_options_o()));
1082 gk20a_err(dev_from_gk20a(g), 1082 gk20a_err(dev_from_gk20a(g),
1083 "image compute preemption option (CTA is 1) %x\n", 1083 "image compute preemption option (CTA is 1) %x\n",
1084 gk20a_mem_rd(g, mem, 1084 nvgpu_mem_rd(g, mem,
1085 ctxsw_prog_main_image_compute_preemption_options_o())); 1085 ctxsw_prog_main_image_compute_preemption_options_o()));
1086 gk20a_mem_end(g, mem); 1086 nvgpu_mem_end(g, mem);
1087} 1087}
1088 1088
1089static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, 1089static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
@@ -1123,21 +1123,21 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1123 1123
1124 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) { 1124 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) {
1125 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 1125 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option);
1126 gk20a_mem_wr(g, mem, 1126 nvgpu_mem_wr(g, mem,
1127 ctxsw_prog_main_image_graphics_preemption_options_o(), 1127 ctxsw_prog_main_image_graphics_preemption_options_o(),
1128 gfxp_preempt_option); 1128 gfxp_preempt_option);
1129 } 1129 }
1130 1130
1131 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) { 1131 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
1132 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 1132 gk20a_dbg_info("CILP: %x", cilp_preempt_option);
1133 gk20a_mem_wr(g, mem, 1133 nvgpu_mem_wr(g, mem,
1134 ctxsw_prog_main_image_compute_preemption_options_o(), 1134 ctxsw_prog_main_image_compute_preemption_options_o(),
1135 cilp_preempt_option); 1135 cilp_preempt_option);
1136 } 1136 }
1137 1137
1138 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) { 1138 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) {
1139 gk20a_dbg_info("CTA: %x", cta_preempt_option); 1139 gk20a_dbg_info("CTA: %x", cta_preempt_option);
1140 gk20a_mem_wr(g, mem, 1140 nvgpu_mem_wr(g, mem,
1141 ctxsw_prog_main_image_compute_preemption_options_o(), 1141 ctxsw_prog_main_image_compute_preemption_options_o(),
1142 cta_preempt_option); 1142 cta_preempt_option);
1143 } 1143 }
@@ -1147,7 +1147,7 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1147 u32 size; 1147 u32 size;
1148 u32 cbes_reserve; 1148 u32 cbes_reserve;
1149 1149
1150 gk20a_mem_wr(g, mem, 1150 nvgpu_mem_wr(g, mem,
1151 ctxsw_prog_main_image_full_preemption_ptr_o(), 1151 ctxsw_prog_main_image_full_preemption_ptr_o(),
1152 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8); 1152 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8);
1153 1153
@@ -2077,7 +2077,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
2077 2077
2078 gr_ctx->boosted_ctx = boost; 2078 gr_ctx->boosted_ctx = boost;
2079 2079
2080 if (gk20a_mem_begin(g, mem)) 2080 if (nvgpu_mem_begin(g, mem))
2081 return -ENOMEM; 2081 return -ENOMEM;
2082 2082
2083 err = gk20a_disable_channel_tsg(g, ch); 2083 err = gk20a_disable_channel_tsg(g, ch);
@@ -2096,7 +2096,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
2096enable_ch: 2096enable_ch:
2097 gk20a_enable_channel_tsg(g, ch); 2097 gk20a_enable_channel_tsg(g, ch);
2098unmap_ctx: 2098unmap_ctx:
2099 gk20a_mem_end(g, mem); 2099 nvgpu_mem_end(g, mem);
2100 2100
2101 return err; 2101 return err;
2102} 2102}
@@ -2107,7 +2107,7 @@ static void gr_gp10b_update_boosted_ctx(struct gk20a *g, struct mem_desc *mem,
2107 2107
2108 v = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f( 2108 v = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f(
2109 gr_ctx->boosted_ctx); 2109 gr_ctx->boosted_ctx);
2110 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_pmu_options_o(), v); 2110 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_pmu_options_o(), v);
2111} 2111}
2112 2112
2113static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, 2113static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
@@ -2164,7 +2164,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2164 } 2164 }
2165 } 2165 }
2166 2166
2167 if (gk20a_mem_begin(g, mem)) 2167 if (nvgpu_mem_begin(g, mem))
2168 return -ENOMEM; 2168 return -ENOMEM;
2169 2169
2170 err = gk20a_disable_channel_tsg(g, ch); 2170 err = gk20a_disable_channel_tsg(g, ch);
@@ -2191,7 +2191,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2191enable_ch: 2191enable_ch:
2192 gk20a_enable_channel_tsg(g, ch); 2192 gk20a_enable_channel_tsg(g, ch);
2193unmap_ctx: 2193unmap_ctx:
2194 gk20a_mem_end(g, mem); 2194 nvgpu_mem_end(g, mem);
2195 2195
2196 return err; 2196 return err;
2197} 2197}
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index a5322bad..8c6340f0 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -107,7 +107,7 @@ static int gb10b_init_bar2_mm_hw_setup(struct gk20a *g)
107 gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa); 107 gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa);
108 108
109 gk20a_writel(g, bus_bar2_block_r(), 109 gk20a_writel(g, bus_bar2_block_r(),
110 gk20a_aperture_mask(g, inst_block, 110 nvgpu_aperture_mask(g, inst_block,
111 bus_bar2_block_target_sys_mem_ncoh_f(), 111 bus_bar2_block_target_sys_mem_ncoh_f(),
112 bus_bar2_block_target_vid_mem_f()) | 112 bus_bar2_block_target_vid_mem_f()) |
113 bus_bar2_block_mode_virtual_f() | 113 bus_bar2_block_mode_virtual_f() |
@@ -162,7 +162,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
162 u32 kind_v, u64 *ctag, 162 u32 kind_v, u64 *ctag,
163 bool cacheable, bool unmapped_pte, 163 bool cacheable, bool unmapped_pte,
164 int rw_flag, bool sparse, bool priv, 164 int rw_flag, bool sparse, bool priv,
165 enum gk20a_aperture aperture) 165 enum nvgpu_aperture aperture)
166{ 166{
167 struct gk20a *g = gk20a_from_vm(vm); 167 struct gk20a *g = gk20a_from_vm(vm);
168 u64 pte_addr = 0; 168 u64 pte_addr = 0;
@@ -174,7 +174,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
174 174
175 pte_addr = gk20a_pde_addr(g, pte) >> gmmu_new_pde_address_shift_v(); 175 pte_addr = gk20a_pde_addr(g, pte) >> gmmu_new_pde_address_shift_v();
176 176
177 pde_v[0] |= gk20a_aperture_mask(g, &pte->mem, 177 pde_v[0] |= nvgpu_aperture_mask(g, &pte->mem,
178 gmmu_new_pde_aperture_sys_mem_ncoh_f(), 178 gmmu_new_pde_aperture_sys_mem_ncoh_f(),
179 gmmu_new_pde_aperture_video_memory_f()); 179 gmmu_new_pde_aperture_video_memory_f());
180 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr)); 180 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr));
@@ -205,7 +205,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
205 u32 kind_v, u64 *ctag, 205 u32 kind_v, u64 *ctag,
206 bool cacheable, bool unmapped_pte, 206 bool cacheable, bool unmapped_pte,
207 int rw_flag, bool sparse, bool priv, 207 int rw_flag, bool sparse, bool priv,
208 enum gk20a_aperture aperture) 208 enum nvgpu_aperture aperture)
209{ 209{
210 struct gk20a *g = gk20a_from_vm(vm); 210 struct gk20a *g = gk20a_from_vm(vm);
211 bool small_valid, big_valid; 211 bool small_valid, big_valid;
@@ -230,7 +230,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
230 230
231 if (small_valid) { 231 if (small_valid) {
232 pde_v[2] |= gmmu_new_dual_pde_address_small_sys_f(pte_addr_small); 232 pde_v[2] |= gmmu_new_dual_pde_address_small_sys_f(pte_addr_small);
233 pde_v[2] |= gk20a_aperture_mask(g, &entry->mem, 233 pde_v[2] |= nvgpu_aperture_mask(g, &entry->mem,
234 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(), 234 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(),
235 gmmu_new_dual_pde_aperture_small_video_memory_f()); 235 gmmu_new_dual_pde_aperture_small_video_memory_f());
236 pde_v[2] |= gmmu_new_dual_pde_vol_small_true_f(); 236 pde_v[2] |= gmmu_new_dual_pde_vol_small_true_f();
@@ -240,7 +240,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
240 if (big_valid) { 240 if (big_valid) {
241 pde_v[0] |= gmmu_new_dual_pde_address_big_sys_f(pte_addr_big); 241 pde_v[0] |= gmmu_new_dual_pde_address_big_sys_f(pte_addr_big);
242 pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f(); 242 pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f();
243 pde_v[0] |= gk20a_aperture_mask(g, &entry->mem, 243 pde_v[0] |= nvgpu_aperture_mask(g, &entry->mem,
244 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(), 244 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(),
245 gmmu_new_dual_pde_aperture_big_video_memory_f()); 245 gmmu_new_dual_pde_aperture_big_video_memory_f());
246 pde_v[1] |= pte_addr_big >> 28; 246 pde_v[1] |= pte_addr_big >> 28;
@@ -268,7 +268,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
268 u32 kind_v, u64 *ctag, 268 u32 kind_v, u64 *ctag,
269 bool cacheable, bool unmapped_pte, 269 bool cacheable, bool unmapped_pte,
270 int rw_flag, bool sparse, bool priv, 270 int rw_flag, bool sparse, bool priv,
271 enum gk20a_aperture aperture) 271 enum nvgpu_aperture aperture)
272{ 272{
273 struct gk20a *g = vm->mm->g; 273 struct gk20a *g = vm->mm->g;
274 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; 274 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
@@ -284,7 +284,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
284 u32 pte_addr = aperture == APERTURE_SYSMEM ? 284 u32 pte_addr = aperture == APERTURE_SYSMEM ?
285 gmmu_new_pte_address_sys_f(iova_v) : 285 gmmu_new_pte_address_sys_f(iova_v) :
286 gmmu_new_pte_address_vid_f(iova_v); 286 gmmu_new_pte_address_vid_f(iova_v);
287 u32 pte_tgt = __gk20a_aperture_mask(g, aperture, 287 u32 pte_tgt = __nvgpu_aperture_mask(g, aperture,
288 gmmu_new_pte_aperture_sys_mem_ncoh_f(), 288 gmmu_new_pte_aperture_sys_mem_ncoh_f(),
289 gmmu_new_pte_aperture_video_memory_f()); 289 gmmu_new_pte_aperture_video_memory_f());
290 290
@@ -384,15 +384,15 @@ static void gp10b_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
384 384
385 gk20a_dbg_info("pde pa=0x%llx", pdb_addr); 385 gk20a_dbg_info("pde pa=0x%llx", pdb_addr);
386 386
387 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), 387 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
388 gk20a_aperture_mask(g, &vm->pdb.mem, 388 nvgpu_aperture_mask(g, &vm->pdb.mem,
389 ram_in_page_dir_base_target_sys_mem_ncoh_f(), 389 ram_in_page_dir_base_target_sys_mem_ncoh_f(),
390 ram_in_page_dir_base_target_vid_mem_f()) | 390 ram_in_page_dir_base_target_vid_mem_f()) |
391 ram_in_page_dir_base_vol_true_f() | 391 ram_in_page_dir_base_vol_true_f() |
392 ram_in_page_dir_base_lo_f(pdb_addr_lo) | 392 ram_in_page_dir_base_lo_f(pdb_addr_lo) |
393 1 << 10); 393 1 << 10);
394 394
395 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), 395 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(),
396 ram_in_page_dir_base_hi_f(pdb_addr_hi)); 396 ram_in_page_dir_base_hi_f(pdb_addr_hi));
397} 397}
398 398