summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-15 19:42:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:14:48 -0400
commitb69020bff5dfa69cad926c9374cdbe9a62509ffd (patch)
tree222f6b6bc23561a38004a257cbac401e431ff3be /drivers/gpu/nvgpu/gk20a
parentfa4ecf5730a75269e85cc41c2ad2ee61307e72a9 (diff)
gpu: nvgpu: Rename gk20a_mem_* functions
Rename the functions used for mem_desc access to nvgpu_mem_*. JIRA NVGPU-12 Change-Id: Ibfdc1112d43f0a125e4487c250e3f977ffd2cd75 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1323325 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/bus_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c54
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fb_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c58
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c190
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c57
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h12
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/pramin_gk20a.c2
13 files changed, 205 insertions, 204 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/bus_gk20a.c b/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
index fda1f80e..d161a29c 100644
--- a/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
@@ -138,7 +138,7 @@ static int gk20a_bus_bar1_bind(struct gk20a *g, struct mem_desc *bar1_inst)
138 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v); 138 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v);
139 139
140 gk20a_writel(g, bus_bar1_block_r(), 140 gk20a_writel(g, bus_bar1_block_r(),
141 gk20a_aperture_mask(g, bar1_inst, 141 nvgpu_aperture_mask(g, bar1_inst,
142 bus_bar1_block_target_sys_mem_ncoh_f(), 142 bus_bar1_block_target_sys_mem_ncoh_f(),
143 bus_bar1_block_target_vid_mem_f()) | 143 bus_bar1_block_target_vid_mem_f()) |
144 bus_bar1_block_mode_virtual_f() | 144 bus_bar1_block_mode_virtual_f() |
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index b7306369..e13a903f 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -2124,7 +2124,7 @@ static void gk20a_submit_append_priv_cmdbuf(struct channel_gk20a *c,
2124 pbdma_gp_entry1_length_f(cmd->size) 2124 pbdma_gp_entry1_length_f(cmd->size)
2125 }; 2125 };
2126 2126
2127 gk20a_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x), 2127 nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x),
2128 &x, sizeof(x)); 2128 &x, sizeof(x));
2129 2129
2130 if (cmd->mem->aperture == APERTURE_SYSMEM) 2130 if (cmd->mem->aperture == APERTURE_SYSMEM)
@@ -2207,10 +2207,10 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c,
2207 int length1 = len - length0; 2207 int length1 = len - length0;
2208 void *src2 = (u8 *)cpu_src + length0; 2208 void *src2 = (u8 *)cpu_src + length0;
2209 2209
2210 gk20a_mem_wr_n(c->g, gpfifo_mem, start, cpu_src, length0); 2210 nvgpu_mem_wr_n(c->g, gpfifo_mem, start, cpu_src, length0);
2211 gk20a_mem_wr_n(c->g, gpfifo_mem, 0, src2, length1); 2211 nvgpu_mem_wr_n(c->g, gpfifo_mem, 0, src2, length1);
2212 } else { 2212 } else {
2213 gk20a_mem_wr_n(c->g, gpfifo_mem, start, cpu_src, len); 2213 nvgpu_mem_wr_n(c->g, gpfifo_mem, start, cpu_src, len);
2214 2214
2215 } 2215 }
2216 2216
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 8baf60dd..d9dfb133 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -46,13 +46,13 @@ static void add_wait_cmd(struct gk20a *g, struct priv_cmd_entry *cmd, u32 off,
46{ 46{
47 off = cmd->off + off; 47 off = cmd->off + off;
48 /* syncpoint_a */ 48 /* syncpoint_a */
49 gk20a_mem_wr32(g, cmd->mem, off++, 0x2001001C); 49 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C);
50 /* payload */ 50 /* payload */
51 gk20a_mem_wr32(g, cmd->mem, off++, thresh); 51 nvgpu_mem_wr32(g, cmd->mem, off++, thresh);
52 /* syncpoint_b */ 52 /* syncpoint_b */
53 gk20a_mem_wr32(g, cmd->mem, off++, 0x2001001D); 53 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D);
54 /* syncpt_id, switch_en, wait */ 54 /* syncpt_id, switch_en, wait */
55 gk20a_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10); 55 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10);
56} 56}
57 57
58static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, 58static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
@@ -151,7 +151,7 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
151 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, 151 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev,
152 wait_id, wait_value)) { 152 wait_id, wait_value)) {
153 /* each wait_cmd is 4 u32s */ 153 /* each wait_cmd is 4 u32s */
154 gk20a_memset(c->g, wait_cmd->mem, 154 nvgpu_memset(c->g, wait_cmd->mem,
155 (wait_cmd->off + i * 4) * sizeof(u32), 155 (wait_cmd->off + i * 4) * sizeof(u32),
156 0, 4 * sizeof(u32)); 156 0, 4 * sizeof(u32));
157 } else 157 } else
@@ -212,22 +212,22 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
212 212
213 if (wfi_cmd) { 213 if (wfi_cmd) {
214 /* wfi */ 214 /* wfi */
215 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001E); 215 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001E);
216 /* handle, ignored */ 216 /* handle, ignored */
217 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x00000000); 217 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x00000000);
218 } 218 }
219 /* syncpoint_a */ 219 /* syncpoint_a */
220 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001C); 220 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001C);
221 /* payload, ignored */ 221 /* payload, ignored */
222 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0); 222 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0);
223 /* syncpoint_b */ 223 /* syncpoint_b */
224 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D); 224 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D);
225 /* syncpt_id, incr */ 225 /* syncpt_id, incr */
226 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1); 226 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1);
227 /* syncpoint_b */ 227 /* syncpoint_b */
228 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D); 228 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D);
229 /* syncpt_id, incr */ 229 /* syncpt_id, incr */
230 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1); 230 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1);
231 231
232 WARN_ON(off - incr_cmd->off != incr_cmd_size); 232 WARN_ON(off - incr_cmd->off != incr_cmd_size);
233 233
@@ -531,39 +531,39 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
531 nvgpu_semaphore_incr(s); 531 nvgpu_semaphore_incr(s);
532 532
533 /* semaphore_a */ 533 /* semaphore_a */
534 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010004); 534 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004);
535 /* offset_upper */ 535 /* offset_upper */
536 gk20a_mem_wr32(g, cmd->mem, off++, (va >> 32) & 0xff); 536 nvgpu_mem_wr32(g, cmd->mem, off++, (va >> 32) & 0xff);
537 /* semaphore_b */ 537 /* semaphore_b */
538 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010005); 538 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005);
539 /* offset */ 539 /* offset */
540 gk20a_mem_wr32(g, cmd->mem, off++, va & 0xffffffff); 540 nvgpu_mem_wr32(g, cmd->mem, off++, va & 0xffffffff);
541 541
542 if (acquire) { 542 if (acquire) {
543 /* semaphore_c */ 543 /* semaphore_c */
544 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006); 544 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006);
545 /* payload */ 545 /* payload */
546 gk20a_mem_wr32(g, cmd->mem, off++, 546 nvgpu_mem_wr32(g, cmd->mem, off++,
547 nvgpu_semaphore_get_value(s)); 547 nvgpu_semaphore_get_value(s));
548 /* semaphore_d */ 548 /* semaphore_d */
549 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007); 549 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007);
550 /* operation: acq_geq, switch_en */ 550 /* operation: acq_geq, switch_en */
551 gk20a_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12)); 551 nvgpu_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12));
552 } else { 552 } else {
553 /* semaphore_c */ 553 /* semaphore_c */
554 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006); 554 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006);
555 /* payload */ 555 /* payload */
556 gk20a_mem_wr32(g, cmd->mem, off++, 556 nvgpu_mem_wr32(g, cmd->mem, off++,
557 nvgpu_semaphore_get_value(s)); 557 nvgpu_semaphore_get_value(s));
558 /* semaphore_d */ 558 /* semaphore_d */
559 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007); 559 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007);
560 /* operation: release, wfi */ 560 /* operation: release, wfi */
561 gk20a_mem_wr32(g, cmd->mem, off++, 561 nvgpu_mem_wr32(g, cmd->mem, off++,
562 0x2 | ((wfi ? 0x0 : 0x1) << 20)); 562 0x2 | ((wfi ? 0x0 : 0x1) << 20));
563 /* non_stall_int */ 563 /* non_stall_int */
564 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010008); 564 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010008);
565 /* ignored */ 565 /* ignored */
566 gk20a_mem_wr32(g, cmd->mem, off++, 0); 566 nvgpu_mem_wr32(g, cmd->mem, off++, 0);
567 } 567 }
568 568
569 if (acquire) 569 if (acquire)
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index 7e7c9cb8..5724be72 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -87,7 +87,7 @@ static void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
87 87
88 ch_state[chid]->pid = ch->pid; 88 ch_state[chid]->pid = ch->pid;
89 ch_state[chid]->refs = atomic_read(&ch->ref_count); 89 ch_state[chid]->refs = atomic_read(&ch->ref_count);
90 gk20a_mem_rd_n(g, &ch->inst_block, 0, 90 nvgpu_mem_rd_n(g, &ch->inst_block, 0,
91 &ch_state[chid]->inst_block[0], 91 &ch_state[chid]->inst_block[0],
92 ram_in_alloc_size_v()); 92 ram_in_alloc_size_v());
93 gk20a_channel_put(ch); 93 gk20a_channel_put(ch);
diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
index 44f0ac4c..2e0809ee 100644
--- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
@@ -135,7 +135,7 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct mem_desc *pdb)
135 135
136 gk20a_writel(g, fb_mmu_invalidate_pdb_r(), 136 gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
137 fb_mmu_invalidate_pdb_addr_f(addr_lo) | 137 fb_mmu_invalidate_pdb_addr_f(addr_lo) |
138 gk20a_aperture_mask(g, pdb, 138 nvgpu_aperture_mask(g, pdb,
139 fb_mmu_invalidate_pdb_aperture_sys_mem_f(), 139 fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
140 fb_mmu_invalidate_pdb_aperture_vid_mem_f())); 140 fb_mmu_invalidate_pdb_aperture_vid_mem_f()));
141 141
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
index 4fa71797..b4e3bad1 100644
--- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
@@ -636,11 +636,11 @@ static int gk20a_fecs_trace_bind_channel(struct gk20a *g,
636 pa = gk20a_mm_inst_block_addr(g, &trace->trace_buf); 636 pa = gk20a_mm_inst_block_addr(g, &trace->trace_buf);
637 if (!pa) 637 if (!pa)
638 return -ENOMEM; 638 return -ENOMEM;
639 aperture = gk20a_aperture_mask(g, &trace->trace_buf, 639 aperture = nvgpu_aperture_mask(g, &trace->trace_buf,
640 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(), 640 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(),
641 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f()); 641 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f());
642 642
643 if (gk20a_mem_begin(g, mem)) 643 if (nvgpu_mem_begin(g, mem))
644 return -ENOMEM; 644 return -ENOMEM;
645 645
646 lo = u64_lo32(pa); 646 lo = u64_lo32(pa);
@@ -649,19 +649,19 @@ static int gk20a_fecs_trace_bind_channel(struct gk20a *g,
649 gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, 649 gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi,
650 lo, GK20A_FECS_TRACE_NUM_RECORDS); 650 lo, GK20A_FECS_TRACE_NUM_RECORDS);
651 651
652 gk20a_mem_wr(g, mem, 652 nvgpu_mem_wr(g, mem,
653 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(), 653 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(),
654 lo); 654 lo);
655 gk20a_mem_wr(g, mem, 655 nvgpu_mem_wr(g, mem,
656 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(), 656 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(),
657 ctxsw_prog_main_image_context_timestamp_buffer_ptr_v_f(hi) | 657 ctxsw_prog_main_image_context_timestamp_buffer_ptr_v_f(hi) |
658 aperture); 658 aperture);
659 gk20a_mem_wr(g, mem, 659 nvgpu_mem_wr(g, mem,
660 ctxsw_prog_main_image_context_timestamp_buffer_control_o(), 660 ctxsw_prog_main_image_context_timestamp_buffer_control_o(),
661 ctxsw_prog_main_image_context_timestamp_buffer_control_num_records_f( 661 ctxsw_prog_main_image_context_timestamp_buffer_control_num_records_f(
662 GK20A_FECS_TRACE_NUM_RECORDS)); 662 GK20A_FECS_TRACE_NUM_RECORDS));
663 663
664 gk20a_mem_end(g, mem); 664 nvgpu_mem_end(g, mem);
665 665
666 /* pid (process identifier) in user space, corresponds to tgid (thread 666 /* pid (process identifier) in user space, corresponds to tgid (thread
667 * group id) in kernel space. 667 * group id) in kernel space.
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index b2a6b1a0..b8b0c9b0 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -3141,7 +3141,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3141 if (count != 0) { 3141 if (count != 0) {
3142 gk20a_writel(g, fifo_runlist_base_r(), 3142 gk20a_writel(g, fifo_runlist_base_r(),
3143 fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) | 3143 fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) |
3144 gk20a_aperture_mask(g, &runlist->mem[new_buf], 3144 nvgpu_aperture_mask(g, &runlist->mem[new_buf],
3145 fifo_runlist_base_target_sys_mem_ncoh_f(), 3145 fifo_runlist_base_target_sys_mem_ncoh_f(),
3146 fifo_runlist_base_target_vid_mem_f())); 3146 fifo_runlist_base_target_vid_mem_f()));
3147 } 3147 }
@@ -3901,7 +3901,7 @@ static void gk20a_fifo_channel_bind(struct channel_gk20a *c)
3901 3901
3902 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), 3902 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid),
3903 ccsr_channel_inst_ptr_f(inst_ptr) | 3903 ccsr_channel_inst_ptr_f(inst_ptr) |
3904 gk20a_aperture_mask(g, &c->inst_block, 3904 nvgpu_aperture_mask(g, &c->inst_block,
3905 ccsr_channel_inst_target_sys_mem_ncoh_f(), 3905 ccsr_channel_inst_target_sys_mem_ncoh_f(),
3906 ccsr_channel_inst_target_vid_mem_f()) | 3906 ccsr_channel_inst_target_vid_mem_f()) |
3907 ccsr_channel_inst_bind_true_f()); 3907 ccsr_channel_inst_bind_true_f());
@@ -3943,14 +3943,14 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
3943 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 3943 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
3944 c->hw_chid, (u64)c->userd_iova); 3944 c->hw_chid, (u64)c->userd_iova);
3945 3945
3946 gk20a_mem_wr32(g, &c->inst_block, 3946 nvgpu_mem_wr32(g, &c->inst_block,
3947 ram_in_ramfc_w() + ram_fc_userd_w(), 3947 ram_in_ramfc_w() + ram_fc_userd_w(),
3948 gk20a_aperture_mask(g, &g->fifo.userd, 3948 nvgpu_aperture_mask(g, &g->fifo.userd,
3949 pbdma_userd_target_sys_mem_ncoh_f(), 3949 pbdma_userd_target_sys_mem_ncoh_f(),
3950 pbdma_userd_target_vid_mem_f()) | 3950 pbdma_userd_target_vid_mem_f()) |
3951 pbdma_userd_addr_f(addr_lo)); 3951 pbdma_userd_addr_f(addr_lo));
3952 3952
3953 gk20a_mem_wr32(g, &c->inst_block, 3953 nvgpu_mem_wr32(g, &c->inst_block,
3954 ram_in_ramfc_w() + ram_fc_userd_hi_w(), 3954 ram_in_ramfc_w() + ram_fc_userd_hi_w(),
3955 pbdma_userd_hi_addr_f(addr_hi)); 3955 pbdma_userd_hi_addr_f(addr_hi));
3956 3956
@@ -3967,25 +3967,25 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3967 3967
3968 gk20a_dbg_fn(""); 3968 gk20a_dbg_fn("");
3969 3969
3970 gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v()); 3970 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
3971 3971
3972 gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(), 3972 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
3973 pbdma_gp_base_offset_f( 3973 pbdma_gp_base_offset_f(
3974 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); 3974 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
3975 3975
3976 gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), 3976 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
3977 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | 3977 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
3978 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); 3978 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
3979 3979
3980 gk20a_mem_wr32(g, mem, ram_fc_signature_w(), 3980 nvgpu_mem_wr32(g, mem, ram_fc_signature_w(),
3981 c->g->ops.fifo.get_pbdma_signature(c->g)); 3981 c->g->ops.fifo.get_pbdma_signature(c->g));
3982 3982
3983 gk20a_mem_wr32(g, mem, ram_fc_formats_w(), 3983 nvgpu_mem_wr32(g, mem, ram_fc_formats_w(),
3984 pbdma_formats_gp_fermi0_f() | 3984 pbdma_formats_gp_fermi0_f() |
3985 pbdma_formats_pb_fermi1_f() | 3985 pbdma_formats_pb_fermi1_f() |
3986 pbdma_formats_mp_fermi0_f()); 3986 pbdma_formats_mp_fermi0_f());
3987 3987
3988 gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(), 3988 nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(),
3989 pbdma_pb_header_priv_user_f() | 3989 pbdma_pb_header_priv_user_f() |
3990 pbdma_pb_header_method_zero_f() | 3990 pbdma_pb_header_method_zero_f() |
3991 pbdma_pb_header_subchannel_zero_f() | 3991 pbdma_pb_header_subchannel_zero_f() |
@@ -3993,27 +3993,27 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3993 pbdma_pb_header_first_true_f() | 3993 pbdma_pb_header_first_true_f() |
3994 pbdma_pb_header_type_inc_f()); 3994 pbdma_pb_header_type_inc_f());
3995 3995
3996 gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(), 3996 nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(),
3997 pbdma_subdevice_id_f(1) | 3997 pbdma_subdevice_id_f(1) |
3998 pbdma_subdevice_status_active_f() | 3998 pbdma_subdevice_status_active_f() |
3999 pbdma_subdevice_channel_dma_enable_f()); 3999 pbdma_subdevice_channel_dma_enable_f());
4000 4000
4001 gk20a_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); 4001 nvgpu_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f());
4002 4002
4003 gk20a_mem_wr32(g, mem, ram_fc_acquire_w(), 4003 nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(),
4004 g->ops.fifo.pbdma_acquire_val(timeout)); 4004 g->ops.fifo.pbdma_acquire_val(timeout));
4005 4005
4006 gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), 4006 nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
4007 fifo_runlist_timeslice_timeout_128_f() | 4007 fifo_runlist_timeslice_timeout_128_f() |
4008 fifo_runlist_timeslice_timescale_3_f() | 4008 fifo_runlist_timeslice_timescale_3_f() |
4009 fifo_runlist_timeslice_enable_true_f()); 4009 fifo_runlist_timeslice_enable_true_f());
4010 4010
4011 gk20a_mem_wr32(g, mem, ram_fc_pb_timeslice_w(), 4011 nvgpu_mem_wr32(g, mem, ram_fc_pb_timeslice_w(),
4012 fifo_pb_timeslice_timeout_16_f() | 4012 fifo_pb_timeslice_timeout_16_f() |
4013 fifo_pb_timeslice_timescale_0_f() | 4013 fifo_pb_timeslice_timescale_0_f() |
4014 fifo_pb_timeslice_enable_true_f()); 4014 fifo_pb_timeslice_enable_true_f());
4015 4015
4016 gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 4016 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
4017 4017
4018 if (c->is_privileged_channel) 4018 if (c->is_privileged_channel)
4019 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 4019 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
@@ -4035,7 +4035,7 @@ static int channel_gk20a_set_schedule_params(struct channel_gk20a *c)
4035 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->hw_chid)); 4035 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->hw_chid));
4036 4036
4037 /* set new timeslice */ 4037 /* set new timeslice */
4038 gk20a_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(), 4038 nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(),
4039 value | (shift << 12) | 4039 value | (shift << 12) |
4040 fifo_runlist_timeslice_enable_true_f()); 4040 fifo_runlist_timeslice_enable_true_f());
4041 4041
@@ -4102,7 +4102,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
4102 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid); 4102 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid);
4103 4103
4104 /* Enable HCE priv mode for phys mode transfer */ 4104 /* Enable HCE priv mode for phys mode transfer */
4105 gk20a_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), 4105 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
4106 pbdma_hce_ctrl_hce_priv_mode_yes_f()); 4106 pbdma_hce_ctrl_hce_priv_mode_yes_f());
4107} 4107}
4108 4108
@@ -4114,16 +4114,16 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
4114 4114
4115 gk20a_dbg_fn(""); 4115 gk20a_dbg_fn("");
4116 4116
4117 gk20a_mem_wr32(g, mem, offset + ram_userd_put_w(), 0); 4117 nvgpu_mem_wr32(g, mem, offset + ram_userd_put_w(), 0);
4118 gk20a_mem_wr32(g, mem, offset + ram_userd_get_w(), 0); 4118 nvgpu_mem_wr32(g, mem, offset + ram_userd_get_w(), 0);
4119 gk20a_mem_wr32(g, mem, offset + ram_userd_ref_w(), 0); 4119 nvgpu_mem_wr32(g, mem, offset + ram_userd_ref_w(), 0);
4120 gk20a_mem_wr32(g, mem, offset + ram_userd_put_hi_w(), 0); 4120 nvgpu_mem_wr32(g, mem, offset + ram_userd_put_hi_w(), 0);
4121 gk20a_mem_wr32(g, mem, offset + ram_userd_ref_threshold_w(), 0); 4121 nvgpu_mem_wr32(g, mem, offset + ram_userd_ref_threshold_w(), 0);
4122 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_w(), 0); 4122 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_w(), 0);
4123 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_hi_w(), 0); 4123 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_hi_w(), 0);
4124 gk20a_mem_wr32(g, mem, offset + ram_userd_get_hi_w(), 0); 4124 nvgpu_mem_wr32(g, mem, offset + ram_userd_get_hi_w(), 0);
4125 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_get_w(), 0); 4125 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_get_w(), 0);
4126 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0); 4126 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0);
4127 4127
4128 return 0; 4128 return 0;
4129} 4129}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 2a9f8a06..db7b3c5d 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -617,7 +617,7 @@ struct gpu_ops {
617 bool sparse, 617 bool sparse,
618 bool priv, 618 bool priv,
619 struct vm_gk20a_mapping_batch *batch, 619 struct vm_gk20a_mapping_batch *batch,
620 enum gk20a_aperture aperture); 620 enum nvgpu_aperture aperture);
621 void (*gmmu_unmap)(struct vm_gk20a *vm, 621 void (*gmmu_unmap)(struct vm_gk20a *vm,
622 u64 vaddr, 622 u64 vaddr,
623 u64 size, 623 u64 size,
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 3e9a388b..360b8c97 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -110,13 +110,13 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
110 Flush and invalidate before cpu update. */ 110 Flush and invalidate before cpu update. */
111 g->ops.mm.l2_flush(g, true); 111 g->ops.mm.l2_flush(g, true);
112 112
113 if (gk20a_mem_begin(g, &ch_ctx->gr_ctx->mem)) 113 if (nvgpu_mem_begin(g, &ch_ctx->gr_ctx->mem))
114 return -ENOMEM; 114 return -ENOMEM;
115 115
116 *ctx_id = gk20a_mem_rd(g, &ch_ctx->gr_ctx->mem, 116 *ctx_id = nvgpu_mem_rd(g, &ch_ctx->gr_ctx->mem,
117 ctxsw_prog_main_image_context_id_o()); 117 ctxsw_prog_main_image_context_id_o());
118 118
119 gk20a_mem_end(g, &ch_ctx->gr_ctx->mem); 119 nvgpu_mem_end(g, &ch_ctx->gr_ctx->mem);
120 120
121 return 0; 121 return 0;
122} 122}
@@ -649,11 +649,11 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
649 addr_lo = u64_lo32(gpu_va) >> 12; 649 addr_lo = u64_lo32(gpu_va) >> 12;
650 addr_hi = u64_hi32(gpu_va); 650 addr_hi = u64_hi32(gpu_va);
651 651
652 gk20a_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_target_w(), 652 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_target_w(),
653 ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() | 653 ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() |
654 ram_in_gr_wfi_ptr_lo_f(addr_lo)); 654 ram_in_gr_wfi_ptr_lo_f(addr_lo));
655 655
656 gk20a_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_ptr_hi_w(), 656 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_ptr_hi_w(),
657 ram_in_gr_wfi_ptr_hi_f(addr_hi)); 657 ram_in_gr_wfi_ptr_hi_f(addr_hi));
658 658
659 return 0; 659 return 0;
@@ -670,16 +670,16 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
670int gr_gk20a_ctx_patch_write_begin(struct gk20a *g, 670int gr_gk20a_ctx_patch_write_begin(struct gk20a *g,
671 struct channel_ctx_gk20a *ch_ctx) 671 struct channel_ctx_gk20a *ch_ctx)
672{ 672{
673 return gk20a_mem_begin(g, &ch_ctx->patch_ctx.mem); 673 return nvgpu_mem_begin(g, &ch_ctx->patch_ctx.mem);
674} 674}
675 675
676void gr_gk20a_ctx_patch_write_end(struct gk20a *g, 676void gr_gk20a_ctx_patch_write_end(struct gk20a *g,
677 struct channel_ctx_gk20a *ch_ctx) 677 struct channel_ctx_gk20a *ch_ctx)
678{ 678{
679 gk20a_mem_end(g, &ch_ctx->patch_ctx.mem); 679 nvgpu_mem_end(g, &ch_ctx->patch_ctx.mem);
680 /* Write context count to context image if it is mapped */ 680 /* Write context count to context image if it is mapped */
681 if (ch_ctx->gr_ctx->mem.cpu_va) { 681 if (ch_ctx->gr_ctx->mem.cpu_va) {
682 gk20a_mem_wr(g, &ch_ctx->gr_ctx->mem, 682 nvgpu_mem_wr(g, &ch_ctx->gr_ctx->mem,
683 ctxsw_prog_main_image_patch_count_o(), 683 ctxsw_prog_main_image_patch_count_o(),
684 ch_ctx->patch_ctx.data_count); 684 ch_ctx->patch_ctx.data_count);
685 } 685 }
@@ -691,8 +691,8 @@ void gr_gk20a_ctx_patch_write(struct gk20a *g,
691{ 691{
692 if (patch) { 692 if (patch) {
693 u32 patch_slot = ch_ctx->patch_ctx.data_count * 2; 693 u32 patch_slot = ch_ctx->patch_ctx.data_count * 2;
694 gk20a_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot, addr); 694 nvgpu_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot, addr);
695 gk20a_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot + 1, data); 695 nvgpu_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot + 1, data);
696 ch_ctx->patch_ctx.data_count++; 696 ch_ctx->patch_ctx.data_count++;
697 } else { 697 } else {
698 gk20a_writel(g, addr, data); 698 gk20a_writel(g, addr, data);
@@ -703,7 +703,7 @@ static u32 fecs_current_ctx_data(struct gk20a *g, struct mem_desc *inst_block)
703{ 703{
704 u32 ptr = u64_lo32(gk20a_mm_inst_block_addr(g, inst_block) 704 u32 ptr = u64_lo32(gk20a_mm_inst_block_addr(g, inst_block)
705 >> ram_in_base_shift_v()); 705 >> ram_in_base_shift_v());
706 u32 aperture = gk20a_aperture_mask(g, inst_block, 706 u32 aperture = nvgpu_aperture_mask(g, inst_block,
707 gr_fecs_current_ctx_target_sys_mem_ncoh_f(), 707 gr_fecs_current_ctx_target_sys_mem_ncoh_f(),
708 gr_fecs_current_ctx_target_vid_mem_f()); 708 gr_fecs_current_ctx_target_vid_mem_f());
709 709
@@ -745,7 +745,7 @@ void gr_gk20a_write_zcull_ptr(struct gk20a *g,
745{ 745{
746 u32 va = u64_lo32(gpu_va >> 8); 746 u32 va = u64_lo32(gpu_va >> 8);
747 747
748 gk20a_mem_wr(g, mem, 748 nvgpu_mem_wr(g, mem,
749 ctxsw_prog_main_image_zcull_ptr_o(), va); 749 ctxsw_prog_main_image_zcull_ptr_o(), va);
750} 750}
751 751
@@ -754,7 +754,7 @@ void gr_gk20a_write_pm_ptr(struct gk20a *g,
754{ 754{
755 u32 va = u64_lo32(gpu_va >> 8); 755 u32 va = u64_lo32(gpu_va >> 8);
756 756
757 gk20a_mem_wr(g, mem, 757 nvgpu_mem_wr(g, mem,
758 ctxsw_prog_main_image_pm_ptr_o(), va); 758 ctxsw_prog_main_image_pm_ptr_o(), va);
759} 759}
760 760
@@ -768,10 +768,10 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
768 768
769 gk20a_dbg_fn(""); 769 gk20a_dbg_fn("");
770 770
771 if (gk20a_mem_begin(g, mem)) 771 if (nvgpu_mem_begin(g, mem))
772 return -ENOMEM; 772 return -ENOMEM;
773 773
774 if (gk20a_mem_begin(g, ctxheader)) { 774 if (nvgpu_mem_begin(g, ctxheader)) {
775 ret = -ENOMEM; 775 ret = -ENOMEM;
776 goto clean_up_mem; 776 goto clean_up_mem;
777 } 777 }
@@ -795,7 +795,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
795 goto clean_up; 795 goto clean_up;
796 } 796 }
797 797
798 gk20a_mem_wr(g, mem, 798 nvgpu_mem_wr(g, mem,
799 ctxsw_prog_main_image_zcull_o(), 799 ctxsw_prog_main_image_zcull_o(),
800 ch_ctx->zcull_ctx.ctx_sw_mode); 800 ch_ctx->zcull_ctx.ctx_sw_mode);
801 801
@@ -808,9 +808,9 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
808 gk20a_enable_channel_tsg(g, c); 808 gk20a_enable_channel_tsg(g, c);
809 809
810clean_up: 810clean_up:
811 gk20a_mem_end(g, ctxheader); 811 nvgpu_mem_end(g, ctxheader);
812clean_up_mem: 812clean_up_mem:
813 gk20a_mem_end(g, mem); 813 nvgpu_mem_end(g, mem);
814 814
815 return ret; 815 return ret;
816} 816}
@@ -1756,10 +1756,10 @@ restore_fe_go_idle:
1756 goto restore_fe_go_idle; 1756 goto restore_fe_go_idle;
1757 } 1757 }
1758 1758
1759 if (gk20a_mem_begin(g, gold_mem)) 1759 if (nvgpu_mem_begin(g, gold_mem))
1760 goto clean_up; 1760 goto clean_up;
1761 1761
1762 if (gk20a_mem_begin(g, gr_mem)) 1762 if (nvgpu_mem_begin(g, gr_mem))
1763 goto clean_up; 1763 goto clean_up;
1764 1764
1765 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32)); 1765 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32));
@@ -1768,26 +1768,26 @@ restore_fe_go_idle:
1768 g->ops.mm.l2_flush(g, true); 1768 g->ops.mm.l2_flush(g, true);
1769 1769
1770 for (i = 0; i < ctx_header_words; i++) { 1770 for (i = 0; i < ctx_header_words; i++) {
1771 data = gk20a_mem_rd32(g, gr_mem, i); 1771 data = nvgpu_mem_rd32(g, gr_mem, i);
1772 gk20a_mem_wr32(g, gold_mem, i, data); 1772 nvgpu_mem_wr32(g, gold_mem, i, data);
1773 } 1773 }
1774 gk20a_mem_wr(g, gold_mem, ctxsw_prog_main_image_zcull_o(), 1774 nvgpu_mem_wr(g, gold_mem, ctxsw_prog_main_image_zcull_o(),
1775 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v()); 1775 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v());
1776 1776
1777 if (gk20a_mem_begin(g, ctxheader)) 1777 if (nvgpu_mem_begin(g, ctxheader))
1778 goto clean_up; 1778 goto clean_up;
1779 1779
1780 if (ctxheader->gpu_va) 1780 if (ctxheader->gpu_va)
1781 g->ops.gr.write_zcull_ptr(g, ctxheader, 0); 1781 g->ops.gr.write_zcull_ptr(g, ctxheader, 0);
1782 else 1782 else
1783 g->ops.gr.write_zcull_ptr(g, gold_mem, 0); 1783 g->ops.gr.write_zcull_ptr(g, gold_mem, 0);
1784 gk20a_mem_end(g, ctxheader); 1784 nvgpu_mem_end(g, ctxheader);
1785 1785
1786 g->ops.gr.commit_inst(c, ch_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]); 1786 g->ops.gr.commit_inst(c, ch_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]);
1787 1787
1788 gr_gk20a_fecs_ctx_image_save(c, gr_fecs_method_push_adr_wfi_golden_save_v()); 1788 gr_gk20a_fecs_ctx_image_save(c, gr_fecs_method_push_adr_wfi_golden_save_v());
1789 1789
1790 if (gk20a_mem_begin(g, ctxheader)) 1790 if (nvgpu_mem_begin(g, ctxheader))
1791 goto clean_up; 1791 goto clean_up;
1792 1792
1793 if (gr->ctx_vars.local_golden_image == NULL) { 1793 if (gr->ctx_vars.local_golden_image == NULL) {
@@ -1801,15 +1801,15 @@ restore_fe_go_idle:
1801 } 1801 }
1802 1802
1803 if (ctxheader->gpu_va) 1803 if (ctxheader->gpu_va)
1804 gk20a_mem_rd_n(g, ctxheader, 0, 1804 nvgpu_mem_rd_n(g, ctxheader, 0,
1805 gr->ctx_vars.local_golden_image, 1805 gr->ctx_vars.local_golden_image,
1806 gr->ctx_vars.golden_image_size); 1806 gr->ctx_vars.golden_image_size);
1807 else 1807 else
1808 gk20a_mem_rd_n(g, gold_mem, 0, 1808 nvgpu_mem_rd_n(g, gold_mem, 0,
1809 gr->ctx_vars.local_golden_image, 1809 gr->ctx_vars.local_golden_image,
1810 gr->ctx_vars.golden_image_size); 1810 gr->ctx_vars.golden_image_size);
1811 } 1811 }
1812 gk20a_mem_end(g, ctxheader); 1812 nvgpu_mem_end(g, ctxheader);
1813 1813
1814 g->ops.gr.commit_inst(c, gr_mem->gpu_va); 1814 g->ops.gr.commit_inst(c, gr_mem->gpu_va);
1815 1815
@@ -1824,8 +1824,8 @@ clean_up:
1824 else 1824 else
1825 gk20a_dbg_fn("done"); 1825 gk20a_dbg_fn("done");
1826 1826
1827 gk20a_mem_end(g, gold_mem); 1827 nvgpu_mem_end(g, gold_mem);
1828 gk20a_mem_end(g, gr_mem); 1828 nvgpu_mem_end(g, gr_mem);
1829 1829
1830 nvgpu_mutex_release(&gr->ctx_mutex); 1830 nvgpu_mutex_release(&gr->ctx_mutex);
1831 return err; 1831 return err;
@@ -1865,22 +1865,22 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1865 Flush and invalidate before cpu update. */ 1865 Flush and invalidate before cpu update. */
1866 g->ops.mm.l2_flush(g, true); 1866 g->ops.mm.l2_flush(g, true);
1867 1867
1868 if (gk20a_mem_begin(g, mem)) { 1868 if (nvgpu_mem_begin(g, mem)) {
1869 ret = -ENOMEM; 1869 ret = -ENOMEM;
1870 goto out; 1870 goto out;
1871 } 1871 }
1872 1872
1873 data = gk20a_mem_rd(g, mem, 1873 data = nvgpu_mem_rd(g, mem,
1874 ctxsw_prog_main_image_pm_o()); 1874 ctxsw_prog_main_image_pm_o());
1875 data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m(); 1875 data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m();
1876 data |= enable_smpc_ctxsw ? 1876 data |= enable_smpc_ctxsw ?
1877 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() : 1877 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() :
1878 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(); 1878 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f();
1879 gk20a_mem_wr(g, mem, 1879 nvgpu_mem_wr(g, mem,
1880 ctxsw_prog_main_image_pm_o(), 1880 ctxsw_prog_main_image_pm_o(),
1881 data); 1881 data);
1882 1882
1883 gk20a_mem_end(g, mem); 1883 nvgpu_mem_end(g, mem);
1884 1884
1885out: 1885out:
1886 gk20a_enable_channel_tsg(g, c); 1886 gk20a_enable_channel_tsg(g, c);
@@ -1964,27 +1964,27 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1964 } 1964 }
1965 1965
1966 /* Now clear the buffer */ 1966 /* Now clear the buffer */
1967 if (gk20a_mem_begin(g, &pm_ctx->mem)) { 1967 if (nvgpu_mem_begin(g, &pm_ctx->mem)) {
1968 ret = -ENOMEM; 1968 ret = -ENOMEM;
1969 goto cleanup_pm_buf; 1969 goto cleanup_pm_buf;
1970 } 1970 }
1971 1971
1972 gk20a_memset(g, &pm_ctx->mem, 0, 0, pm_ctx->mem.size); 1972 nvgpu_memset(g, &pm_ctx->mem, 0, 0, pm_ctx->mem.size);
1973 1973
1974 gk20a_mem_end(g, &pm_ctx->mem); 1974 nvgpu_mem_end(g, &pm_ctx->mem);
1975 } 1975 }
1976 1976
1977 if (gk20a_mem_begin(g, gr_mem)) { 1977 if (nvgpu_mem_begin(g, gr_mem)) {
1978 ret = -ENOMEM; 1978 ret = -ENOMEM;
1979 goto cleanup_pm_buf; 1979 goto cleanup_pm_buf;
1980 } 1980 }
1981 1981
1982 if (gk20a_mem_begin(g, ctxheader)) { 1982 if (nvgpu_mem_begin(g, ctxheader)) {
1983 ret = -ENOMEM; 1983 ret = -ENOMEM;
1984 goto clean_up_mem; 1984 goto clean_up_mem;
1985 } 1985 }
1986 1986
1987 data = gk20a_mem_rd(g, gr_mem, ctxsw_prog_main_image_pm_o()); 1987 data = nvgpu_mem_rd(g, gr_mem, ctxsw_prog_main_image_pm_o());
1988 data = data & ~ctxsw_prog_main_image_pm_mode_m(); 1988 data = data & ~ctxsw_prog_main_image_pm_mode_m();
1989 1989
1990 if (enable_hwpm_ctxsw) { 1990 if (enable_hwpm_ctxsw) {
@@ -1998,22 +1998,22 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1998 1998
1999 data |= pm_ctx->pm_mode; 1999 data |= pm_ctx->pm_mode;
2000 2000
2001 gk20a_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data); 2001 nvgpu_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data);
2002 2002
2003 if (ctxheader->gpu_va) 2003 if (ctxheader->gpu_va)
2004 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr); 2004 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr);
2005 else 2005 else
2006 g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr); 2006 g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr);
2007 2007
2008 gk20a_mem_end(g, ctxheader); 2008 nvgpu_mem_end(g, ctxheader);
2009 gk20a_mem_end(g, gr_mem); 2009 nvgpu_mem_end(g, gr_mem);
2010 2010
2011 /* enable channel */ 2011 /* enable channel */
2012 gk20a_enable_channel_tsg(g, c); 2012 gk20a_enable_channel_tsg(g, c);
2013 2013
2014 return 0; 2014 return 0;
2015clean_up_mem: 2015clean_up_mem:
2016 gk20a_mem_end(g, gr_mem); 2016 nvgpu_mem_end(g, gr_mem);
2017cleanup_pm_buf: 2017cleanup_pm_buf:
2018 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 2018 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
2019 gk20a_mem_flag_none); 2019 gk20a_mem_flag_none);
@@ -2048,10 +2048,10 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2048 Flush and invalidate before cpu update. */ 2048 Flush and invalidate before cpu update. */
2049 g->ops.mm.l2_flush(g, true); 2049 g->ops.mm.l2_flush(g, true);
2050 2050
2051 if (gk20a_mem_begin(g, mem)) 2051 if (nvgpu_mem_begin(g, mem))
2052 return -ENOMEM; 2052 return -ENOMEM;
2053 2053
2054 if (gk20a_mem_begin(g, ctxheader)) { 2054 if (nvgpu_mem_begin(g, ctxheader)) {
2055 ret = -ENOMEM; 2055 ret = -ENOMEM;
2056 goto clean_up_mem; 2056 goto clean_up_mem;
2057 } 2057 }
@@ -2060,12 +2060,12 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2060 if (g->ops.gr.restore_context_header) 2060 if (g->ops.gr.restore_context_header)
2061 g->ops.gr.restore_context_header(g, ctxheader); 2061 g->ops.gr.restore_context_header(g, ctxheader);
2062 } else { 2062 } else {
2063 gk20a_mem_wr_n(g, mem, 0, 2063 nvgpu_mem_wr_n(g, mem, 0,
2064 gr->ctx_vars.local_golden_image, 2064 gr->ctx_vars.local_golden_image,
2065 gr->ctx_vars.golden_image_size); 2065 gr->ctx_vars.golden_image_size);
2066 gk20a_mem_wr(g, mem, 2066 nvgpu_mem_wr(g, mem,
2067 ctxsw_prog_main_image_num_save_ops_o(), 0); 2067 ctxsw_prog_main_image_num_save_ops_o(), 0);
2068 gk20a_mem_wr(g, mem, 2068 nvgpu_mem_wr(g, mem,
2069 ctxsw_prog_main_image_num_restore_ops_o(), 0); 2069 ctxsw_prog_main_image_num_restore_ops_o(), 0);
2070 } 2070 }
2071 2071
@@ -2083,29 +2083,29 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2083 else 2083 else
2084 data = ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(); 2084 data = ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f();
2085 2085
2086 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_priv_access_map_config_o(), 2086 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_priv_access_map_config_o(),
2087 data); 2087 data);
2088 2088
2089 if (ctxheader->gpu_va) { 2089 if (ctxheader->gpu_va) {
2090 gk20a_mem_wr(g, ctxheader, 2090 nvgpu_mem_wr(g, ctxheader,
2091 ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 2091 ctxsw_prog_main_image_priv_access_map_addr_lo_o(),
2092 virt_addr_lo); 2092 virt_addr_lo);
2093 gk20a_mem_wr(g, ctxheader, 2093 nvgpu_mem_wr(g, ctxheader,
2094 ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 2094 ctxsw_prog_main_image_priv_access_map_addr_hi_o(),
2095 virt_addr_hi); 2095 virt_addr_hi);
2096 } else { 2096 } else {
2097 gk20a_mem_wr(g, mem, 2097 nvgpu_mem_wr(g, mem,
2098 ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 2098 ctxsw_prog_main_image_priv_access_map_addr_lo_o(),
2099 virt_addr_lo); 2099 virt_addr_lo);
2100 gk20a_mem_wr(g, mem, 2100 nvgpu_mem_wr(g, mem,
2101 ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 2101 ctxsw_prog_main_image_priv_access_map_addr_hi_o(),
2102 virt_addr_hi); 2102 virt_addr_hi);
2103 } 2103 }
2104 /* disable verif features */ 2104 /* disable verif features */
2105 v = gk20a_mem_rd(g, mem, ctxsw_prog_main_image_misc_options_o()); 2105 v = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_misc_options_o());
2106 v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m()); 2106 v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m());
2107 v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f(); 2107 v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f();
2108 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_misc_options_o(), v); 2108 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_misc_options_o(), v);
2109 2109
2110 if (g->ops.gr.update_ctxsw_preemption_mode) 2110 if (g->ops.gr.update_ctxsw_preemption_mode)
2111 g->ops.gr.update_ctxsw_preemption_mode(g, ch_ctx, mem); 2111 g->ops.gr.update_ctxsw_preemption_mode(g, ch_ctx, mem);
@@ -2116,26 +2116,26 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2116 virt_addr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va); 2116 virt_addr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va);
2117 virt_addr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va); 2117 virt_addr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va);
2118 2118
2119 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_count_o(), 2119 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_count_o(),
2120 ch_ctx->patch_ctx.data_count); 2120 ch_ctx->patch_ctx.data_count);
2121 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_lo_o(), 2121 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_lo_o(),
2122 virt_addr_lo); 2122 virt_addr_lo);
2123 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_hi_o(), 2123 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_hi_o(),
2124 virt_addr_hi); 2124 virt_addr_hi);
2125 2125
2126 if (ctxheader->gpu_va) { 2126 if (ctxheader->gpu_va) {
2127 gk20a_mem_wr(g, ctxheader, 2127 nvgpu_mem_wr(g, ctxheader,
2128 ctxsw_prog_main_image_patch_count_o(), 2128 ctxsw_prog_main_image_patch_count_o(),
2129 ch_ctx->patch_ctx.data_count); 2129 ch_ctx->patch_ctx.data_count);
2130 gk20a_mem_wr(g, ctxheader, 2130 nvgpu_mem_wr(g, ctxheader,
2131 ctxsw_prog_main_image_patch_adr_lo_o(), 2131 ctxsw_prog_main_image_patch_adr_lo_o(),
2132 virt_addr_lo); 2132 virt_addr_lo);
2133 gk20a_mem_wr(g, ctxheader, 2133 nvgpu_mem_wr(g, ctxheader,
2134 ctxsw_prog_main_image_patch_adr_hi_o(), 2134 ctxsw_prog_main_image_patch_adr_hi_o(),
2135 virt_addr_hi); 2135 virt_addr_hi);
2136 } 2136 }
2137 2137
2138 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_zcull_o(), 2138 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_zcull_o(),
2139 ch_ctx->zcull_ctx.ctx_sw_mode); 2139 ch_ctx->zcull_ctx.ctx_sw_mode);
2140 2140
2141 if (ctxheader->gpu_va) 2141 if (ctxheader->gpu_va)
@@ -2153,7 +2153,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2153 if (ch_ctx->pm_ctx.mem.gpu_va == 0) { 2153 if (ch_ctx->pm_ctx.mem.gpu_va == 0) {
2154 gk20a_err(dev_from_gk20a(g), 2154 gk20a_err(dev_from_gk20a(g),
2155 "context switched pm with no pm buffer!"); 2155 "context switched pm with no pm buffer!");
2156 gk20a_mem_end(g, mem); 2156 nvgpu_mem_end(g, mem);
2157 return -EFAULT; 2157 return -EFAULT;
2158 } 2158 }
2159 2159
@@ -2161,11 +2161,11 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2161 } else 2161 } else
2162 virt_addr = 0; 2162 virt_addr = 0;
2163 2163
2164 data = gk20a_mem_rd(g, mem, ctxsw_prog_main_image_pm_o()); 2164 data = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_pm_o());
2165 data = data & ~ctxsw_prog_main_image_pm_mode_m(); 2165 data = data & ~ctxsw_prog_main_image_pm_mode_m();
2166 data |= ch_ctx->pm_ctx.pm_mode; 2166 data |= ch_ctx->pm_ctx.pm_mode;
2167 2167
2168 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_pm_o(), data); 2168 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_pm_o(), data);
2169 2169
2170 if (ctxheader->gpu_va) 2170 if (ctxheader->gpu_va)
2171 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr); 2171 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr);
@@ -2173,9 +2173,9 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2173 g->ops.gr.write_pm_ptr(g, mem, virt_addr); 2173 g->ops.gr.write_pm_ptr(g, mem, virt_addr);
2174 2174
2175 2175
2176 gk20a_mem_end(g, ctxheader); 2176 nvgpu_mem_end(g, ctxheader);
2177clean_up_mem: 2177clean_up_mem:
2178 gk20a_mem_end(g, mem); 2178 nvgpu_mem_end(g, mem);
2179 2179
2180 return ret; 2180 return ret;
2181} 2181}
@@ -2256,11 +2256,11 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
2256{ 2256{
2257 unsigned int i; 2257 unsigned int i;
2258 2258
2259 gk20a_mem_wr_n(g, dst, segments->boot.offset, bootimage, 2259 nvgpu_mem_wr_n(g, dst, segments->boot.offset, bootimage,
2260 segments->boot.size); 2260 segments->boot.size);
2261 gk20a_mem_wr_n(g, dst, segments->code.offset, code, 2261 nvgpu_mem_wr_n(g, dst, segments->code.offset, code,
2262 segments->code.size); 2262 segments->code.size);
2263 gk20a_mem_wr_n(g, dst, segments->data.offset, data, 2263 nvgpu_mem_wr_n(g, dst, segments->data.offset, data,
2264 segments->data.size); 2264 segments->data.size);
2265 2265
2266 /* compute a "checksum" for the boot binary to detect its version */ 2266 /* compute a "checksum" for the boot binary to detect its version */
@@ -2382,14 +2382,14 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2382 inst_ptr = gk20a_mm_inst_block_addr(g, &ucode_info->inst_blk_desc); 2382 inst_ptr = gk20a_mm_inst_block_addr(g, &ucode_info->inst_blk_desc);
2383 gk20a_writel(g, gr_fecs_new_ctx_r(), 2383 gk20a_writel(g, gr_fecs_new_ctx_r(),
2384 gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) | 2384 gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) |
2385 gk20a_aperture_mask(g, &ucode_info->inst_blk_desc, 2385 nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
2386 gr_fecs_new_ctx_target_sys_mem_ncoh_f(), 2386 gr_fecs_new_ctx_target_sys_mem_ncoh_f(),
2387 gr_fecs_new_ctx_target_vid_mem_f()) | 2387 gr_fecs_new_ctx_target_vid_mem_f()) |
2388 gr_fecs_new_ctx_valid_m()); 2388 gr_fecs_new_ctx_valid_m());
2389 2389
2390 gk20a_writel(g, gr_fecs_arb_ctx_ptr_r(), 2390 gk20a_writel(g, gr_fecs_arb_ctx_ptr_r(),
2391 gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) | 2391 gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) |
2392 gk20a_aperture_mask(g, &ucode_info->inst_blk_desc, 2392 nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
2393 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(), 2393 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(),
2394 gr_fecs_arb_ctx_ptr_target_vid_mem_f())); 2394 gr_fecs_arb_ctx_ptr_target_vid_mem_f()));
2395 2395
@@ -4748,7 +4748,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4748 addr >>= fb_mmu_debug_wr_addr_alignment_v(); 4748 addr >>= fb_mmu_debug_wr_addr_alignment_v();
4749 4749
4750 gk20a_writel(g, fb_mmu_debug_wr_r(), 4750 gk20a_writel(g, fb_mmu_debug_wr_r(),
4751 gk20a_aperture_mask(g, &gr->mmu_wr_mem, 4751 nvgpu_aperture_mask(g, &gr->mmu_wr_mem,
4752 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(), 4752 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
4753 fb_mmu_debug_wr_aperture_vid_mem_f()) | 4753 fb_mmu_debug_wr_aperture_vid_mem_f()) |
4754 fb_mmu_debug_wr_vol_false_f() | 4754 fb_mmu_debug_wr_vol_false_f() |
@@ -4758,7 +4758,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4758 addr >>= fb_mmu_debug_rd_addr_alignment_v(); 4758 addr >>= fb_mmu_debug_rd_addr_alignment_v();
4759 4759
4760 gk20a_writel(g, fb_mmu_debug_rd_r(), 4760 gk20a_writel(g, fb_mmu_debug_rd_r(),
4761 gk20a_aperture_mask(g, &gr->mmu_rd_mem, 4761 nvgpu_aperture_mask(g, &gr->mmu_rd_mem,
4762 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(), 4762 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
4763 fb_mmu_debug_rd_aperture_vid_mem_f()) | 4763 fb_mmu_debug_rd_aperture_vid_mem_f()) |
4764 fb_mmu_debug_rd_vol_false_f() | 4764 fb_mmu_debug_rd_vol_false_f() |
@@ -5092,13 +5092,13 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
5092 u32 *whitelist = NULL; 5092 u32 *whitelist = NULL;
5093 unsigned int num_entries = 0; 5093 unsigned int num_entries = 0;
5094 5094
5095 if (gk20a_mem_begin(g, mem)) { 5095 if (nvgpu_mem_begin(g, mem)) {
5096 gk20a_err(dev_from_gk20a(g), 5096 gk20a_err(dev_from_gk20a(g),
5097 "failed to map priv access map memory"); 5097 "failed to map priv access map memory");
5098 return -ENOMEM; 5098 return -ENOMEM;
5099 } 5099 }
5100 5100
5101 gk20a_memset(g, mem, 0, 0, PAGE_SIZE * nr_pages); 5101 nvgpu_memset(g, mem, 0, 0, PAGE_SIZE * nr_pages);
5102 5102
5103 g->ops.gr.get_access_map(g, &whitelist, &num_entries); 5103 g->ops.gr.get_access_map(g, &whitelist, &num_entries);
5104 5104
@@ -5109,14 +5109,14 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
5109 map_shift = map_bit & 0x7; /* i.e. 0-7 */ 5109 map_shift = map_bit & 0x7; /* i.e. 0-7 */
5110 gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d", 5110 gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d",
5111 whitelist[w], map_byte, map_shift); 5111 whitelist[w], map_byte, map_shift);
5112 x = gk20a_mem_rd32(g, mem, map_byte / sizeof(u32)); 5112 x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32));
5113 x |= 1 << ( 5113 x |= 1 << (
5114 (map_byte % sizeof(u32) * BITS_PER_BYTE) 5114 (map_byte % sizeof(u32) * BITS_PER_BYTE)
5115 + map_shift); 5115 + map_shift);
5116 gk20a_mem_wr32(g, mem, map_byte / sizeof(u32), x); 5116 nvgpu_mem_wr32(g, mem, map_byte / sizeof(u32), x);
5117 } 5117 }
5118 5118
5119 gk20a_mem_end(g, mem); 5119 nvgpu_mem_end(g, mem);
5120 return 0; 5120 return 0;
5121} 5121}
5122 5122
@@ -7160,7 +7160,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
7160 /* reset the patch count from previous 7160 /* reset the patch count from previous
7161 runs,if ucode has already processed 7161 runs,if ucode has already processed
7162 it */ 7162 it */
7163 tmp = gk20a_mem_rd(g, mem, 7163 tmp = nvgpu_mem_rd(g, mem,
7164 ctxsw_prog_main_image_patch_count_o()); 7164 ctxsw_prog_main_image_patch_count_o());
7165 7165
7166 if (!tmp) 7166 if (!tmp)
@@ -7172,13 +7172,13 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
7172 vaddr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va); 7172 vaddr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va);
7173 vaddr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va); 7173 vaddr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va);
7174 7174
7175 gk20a_mem_wr(g, mem, 7175 nvgpu_mem_wr(g, mem,
7176 ctxsw_prog_main_image_patch_count_o(), 7176 ctxsw_prog_main_image_patch_count_o(),
7177 ch_ctx->patch_ctx.data_count); 7177 ch_ctx->patch_ctx.data_count);
7178 gk20a_mem_wr(g, mem, 7178 nvgpu_mem_wr(g, mem,
7179 ctxsw_prog_main_image_patch_adr_lo_o(), 7179 ctxsw_prog_main_image_patch_adr_lo_o(),
7180 vaddr_lo); 7180 vaddr_lo);
7181 gk20a_mem_wr(g, mem, 7181 nvgpu_mem_wr(g, mem,
7182 ctxsw_prog_main_image_patch_adr_hi_o(), 7182 ctxsw_prog_main_image_patch_adr_hi_o(),
7183 vaddr_hi); 7183 vaddr_hi);
7184 7184
@@ -8393,7 +8393,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8393 * gr_gk20a_apply_instmem_overrides, 8393 * gr_gk20a_apply_instmem_overrides,
8394 * recoded in-place instead. 8394 * recoded in-place instead.
8395 */ 8395 */
8396 if (gk20a_mem_begin(g, &ch_ctx->gr_ctx->mem)) { 8396 if (nvgpu_mem_begin(g, &ch_ctx->gr_ctx->mem)) {
8397 err = -ENOMEM; 8397 err = -ENOMEM;
8398 goto cleanup; 8398 goto cleanup;
8399 } 8399 }
@@ -8422,7 +8422,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8422 err = -EINVAL; 8422 err = -EINVAL;
8423 goto cleanup; 8423 goto cleanup;
8424 } 8424 }
8425 if (gk20a_mem_begin(g, &ch_ctx->pm_ctx.mem)) { 8425 if (nvgpu_mem_begin(g, &ch_ctx->pm_ctx.mem)) {
8426 err = -ENOMEM; 8426 err = -ENOMEM;
8427 goto cleanup; 8427 goto cleanup;
8428 } 8428 }
@@ -8445,20 +8445,20 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8445 (offsets[j] >= g->gr.ctx_vars.golden_image_size)) 8445 (offsets[j] >= g->gr.ctx_vars.golden_image_size))
8446 continue; 8446 continue;
8447 if (pass == 0) { /* write pass */ 8447 if (pass == 0) { /* write pass */
8448 v = gk20a_mem_rd(g, current_mem, offsets[j]); 8448 v = nvgpu_mem_rd(g, current_mem, offsets[j]);
8449 v &= ~ctx_ops[i].and_n_mask_lo; 8449 v &= ~ctx_ops[i].and_n_mask_lo;
8450 v |= ctx_ops[i].value_lo; 8450 v |= ctx_ops[i].value_lo;
8451 gk20a_mem_wr(g, current_mem, offsets[j], v); 8451 nvgpu_mem_wr(g, current_mem, offsets[j], v);
8452 8452
8453 gk20a_dbg(gpu_dbg_gpu_dbg, 8453 gk20a_dbg(gpu_dbg_gpu_dbg,
8454 "context wr: offset=0x%x v=0x%x", 8454 "context wr: offset=0x%x v=0x%x",
8455 offsets[j], v); 8455 offsets[j], v);
8456 8456
8457 if (ctx_ops[i].op == REGOP(WRITE_64)) { 8457 if (ctx_ops[i].op == REGOP(WRITE_64)) {
8458 v = gk20a_mem_rd(g, current_mem, offsets[j] + 4); 8458 v = nvgpu_mem_rd(g, current_mem, offsets[j] + 4);
8459 v &= ~ctx_ops[i].and_n_mask_hi; 8459 v &= ~ctx_ops[i].and_n_mask_hi;
8460 v |= ctx_ops[i].value_hi; 8460 v |= ctx_ops[i].value_hi;
8461 gk20a_mem_wr(g, current_mem, offsets[j] + 4, v); 8461 nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v);
8462 8462
8463 gk20a_dbg(gpu_dbg_gpu_dbg, 8463 gk20a_dbg(gpu_dbg_gpu_dbg,
8464 "context wr: offset=0x%x v=0x%x", 8464 "context wr: offset=0x%x v=0x%x",
@@ -8472,14 +8472,14 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8472 8472
8473 } else { /* read pass */ 8473 } else { /* read pass */
8474 ctx_ops[i].value_lo = 8474 ctx_ops[i].value_lo =
8475 gk20a_mem_rd(g, current_mem, offsets[0]); 8475 nvgpu_mem_rd(g, current_mem, offsets[0]);
8476 8476
8477 gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", 8477 gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
8478 offsets[0], ctx_ops[i].value_lo); 8478 offsets[0], ctx_ops[i].value_lo);
8479 8479
8480 if (ctx_ops[i].op == REGOP(READ_64)) { 8480 if (ctx_ops[i].op == REGOP(READ_64)) {
8481 ctx_ops[i].value_hi = 8481 ctx_ops[i].value_hi =
8482 gk20a_mem_rd(g, current_mem, offsets[0] + 4); 8482 nvgpu_mem_rd(g, current_mem, offsets[0] + 4);
8483 8483
8484 gk20a_dbg(gpu_dbg_gpu_dbg, 8484 gk20a_dbg(gpu_dbg_gpu_dbg,
8485 "context rd: offset=0x%x v=0x%x", 8485 "context rd: offset=0x%x v=0x%x",
@@ -8507,9 +8507,9 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8507 if (ch_ctx->patch_ctx.mem.cpu_va) 8507 if (ch_ctx->patch_ctx.mem.cpu_va)
8508 gr_gk20a_ctx_patch_write_end(g, ch_ctx); 8508 gr_gk20a_ctx_patch_write_end(g, ch_ctx);
8509 if (gr_ctx_ready) 8509 if (gr_ctx_ready)
8510 gk20a_mem_end(g, &ch_ctx->gr_ctx->mem); 8510 nvgpu_mem_end(g, &ch_ctx->gr_ctx->mem);
8511 if (pm_ctx_ready) 8511 if (pm_ctx_ready)
8512 gk20a_mem_end(g, &ch_ctx->pm_ctx.mem); 8512 nvgpu_mem_end(g, &ch_ctx->pm_ctx.mem);
8513 8513
8514 if (restart_gr_ctxsw) { 8514 if (restart_gr_ctxsw) {
8515 int tmp_err = gr_gk20a_enable_ctxsw(g); 8515 int tmp_err = gr_gk20a_enable_ctxsw(g);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index e78eb941..9c9fad1b 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -34,6 +34,7 @@
34#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/pramin.h> 35#include <nvgpu/pramin.h>
36#include <nvgpu/list.h> 36#include <nvgpu/list.h>
37#include <nvgpu/mem_desc.h>
37#include <nvgpu/allocator.h> 38#include <nvgpu/allocator.h>
38#include <nvgpu/semaphore.h> 39#include <nvgpu/semaphore.h>
39#include <nvgpu/page_allocator.h> 40#include <nvgpu/page_allocator.h>
@@ -139,7 +140,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
139 bool umapped_pte, int rw_flag, 140 bool umapped_pte, int rw_flag,
140 bool sparse, 141 bool sparse,
141 bool priv, 142 bool priv,
142 enum gk20a_aperture aperture); 143 enum nvgpu_aperture aperture);
143static int __must_check gk20a_init_system_vm(struct mm_gk20a *mm); 144static int __must_check gk20a_init_system_vm(struct mm_gk20a *mm);
144static int __must_check gk20a_init_bar1_vm(struct mm_gk20a *mm); 145static int __must_check gk20a_init_bar1_vm(struct mm_gk20a *mm);
145static int __must_check gk20a_init_hwpm(struct mm_gk20a *mm); 146static int __must_check gk20a_init_hwpm(struct mm_gk20a *mm);
@@ -945,7 +946,7 @@ int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
945 sg_phys(entry->mem.sgt->sgl), 946 sg_phys(entry->mem.sgt->sgl),
946 entry->mem.size); 947 entry->mem.size);
947 } else { 948 } else {
948 int err = gk20a_mem_begin(g, &entry->mem); 949 int err = nvgpu_mem_begin(g, &entry->mem);
949 950
950 if (err) 951 if (err)
951 return err; 952 return err;
@@ -971,7 +972,7 @@ void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
971 sg_phys(entry->mem.sgt->sgl), 972 sg_phys(entry->mem.sgt->sgl),
972 entry->mem.size); 973 entry->mem.size);
973 } else { 974 } else {
974 gk20a_mem_end(g, &entry->mem); 975 nvgpu_mem_end(g, &entry->mem);
975 } 976 }
976} 977}
977 978
@@ -1510,7 +1511,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1510 bool sparse, 1511 bool sparse,
1511 bool priv, 1512 bool priv,
1512 struct vm_gk20a_mapping_batch *batch, 1513 struct vm_gk20a_mapping_batch *batch,
1513 enum gk20a_aperture aperture) 1514 enum nvgpu_aperture aperture)
1514{ 1515{
1515 int err = 0; 1516 int err = 0;
1516 bool allocated = false; 1517 bool allocated = false;
@@ -1543,7 +1544,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1543 sgt ? lo32((u64)sg_phys(sgt->sgl)) : 0, 1544 sgt ? lo32((u64)sg_phys(sgt->sgl)) : 0,
1544 vm->gmmu_page_sizes[pgsz_idx] >> 10, vm_aspace_id(vm), 1545 vm->gmmu_page_sizes[pgsz_idx] >> 10, vm_aspace_id(vm),
1545 ctag_lines, ctag_offset, 1546 ctag_lines, ctag_offset,
1546 kind_v, flags, gk20a_aperture_str(aperture)); 1547 kind_v, flags, nvgpu_aperture_str(aperture));
1547 1548
1548 err = update_gmmu_ptes_locked(vm, pgsz_idx, 1549 err = update_gmmu_ptes_locked(vm, pgsz_idx,
1549 sgt, 1550 sgt,
@@ -1634,7 +1635,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1634 } 1635 }
1635} 1636}
1636 1637
1637static enum gk20a_aperture gk20a_dmabuf_aperture(struct gk20a *g, 1638static enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
1638 struct dma_buf *dmabuf) 1639 struct dma_buf *dmabuf)
1639{ 1640{
1640 struct gk20a *buf_owner = gk20a_vidmem_buf_owner(dmabuf); 1641 struct gk20a *buf_owner = gk20a_vidmem_buf_owner(dmabuf);
@@ -1723,7 +1724,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
1723 vm_aspace_id(vm), 1724 vm_aspace_id(vm),
1724 mapped_buffer->ctag_lines, mapped_buffer->ctag_offset, 1725 mapped_buffer->ctag_lines, mapped_buffer->ctag_offset,
1725 mapped_buffer->flags, 1726 mapped_buffer->flags,
1726 gk20a_aperture_str(gk20a_dmabuf_aperture(g, dmabuf))); 1727 nvgpu_aperture_str(gk20a_dmabuf_aperture(g, dmabuf)));
1727 1728
1728 if (sgt) 1729 if (sgt)
1729 *sgt = mapped_buffer->sgt; 1730 *sgt = mapped_buffer->sgt;
@@ -1941,11 +1942,11 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
1941 1942
1942 switch (cmd) { 1943 switch (cmd) {
1943 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ: 1944 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ:
1944 gk20a_mem_rd_n(g, mem, offset, buffer, size); 1945 nvgpu_mem_rd_n(g, mem, offset, buffer, size);
1945 break; 1946 break;
1946 1947
1947 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE: 1948 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE:
1948 gk20a_mem_wr_n(g, mem, offset, buffer, size); 1949 nvgpu_mem_wr_n(g, mem, offset, buffer, size);
1949 break; 1950 break;
1950 1951
1951 default: 1952 default:
@@ -1959,7 +1960,7 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
1959} 1960}
1960 1961
1961static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl, 1962static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl,
1962 enum gk20a_aperture aperture) 1963 enum nvgpu_aperture aperture)
1963{ 1964{
1964 u64 align = 0, chunk_align = 0; 1965 u64 align = 0, chunk_align = 0;
1965 u64 buf_addr; 1966 u64 buf_addr;
@@ -2030,7 +2031,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2030 u32 ctag_map_win_ctagline = 0; 2031 u32 ctag_map_win_ctagline = 0;
2031 struct vm_reserved_va_node *va_node = NULL; 2032 struct vm_reserved_va_node *va_node = NULL;
2032 u32 ctag_offset; 2033 u32 ctag_offset;
2033 enum gk20a_aperture aperture; 2034 enum nvgpu_aperture aperture;
2034 2035
2035 if (user_mapped && vm->userspace_managed && 2036 if (user_mapped && vm->userspace_managed &&
2036 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { 2037 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) {
@@ -2462,7 +2463,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
2462 u32 flags, 2463 u32 flags,
2463 int rw_flag, 2464 int rw_flag,
2464 bool priv, 2465 bool priv,
2465 enum gk20a_aperture aperture) 2466 enum nvgpu_aperture aperture)
2466{ 2467{
2467 struct gk20a *g = gk20a_from_vm(vm); 2468 struct gk20a *g = gk20a_from_vm(vm);
2468 u64 vaddr; 2469 u64 vaddr;
@@ -2496,7 +2497,7 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm,
2496 u32 flags, 2497 u32 flags,
2497 int rw_flag, 2498 int rw_flag,
2498 bool priv, 2499 bool priv,
2499 enum gk20a_aperture aperture) 2500 enum nvgpu_aperture aperture)
2500{ 2501{
2501 return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv, 2502 return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv,
2502 aperture); 2503 aperture);
@@ -2512,7 +2513,7 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
2512 u32 flags, 2513 u32 flags,
2513 int rw_flag, 2514 int rw_flag,
2514 bool priv, 2515 bool priv,
2515 enum gk20a_aperture aperture) 2516 enum nvgpu_aperture aperture)
2516{ 2517{
2517 return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv, 2518 return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv,
2518 aperture); 2519 aperture);
@@ -2851,7 +2852,7 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem)
2851 schedule_work(&g->mm.vidmem.clear_mem_worker); 2852 schedule_work(&g->mm.vidmem.clear_mem_worker);
2852 } 2853 }
2853 } else { 2854 } else {
2854 gk20a_memset(g, mem, 0, 0, mem->size); 2855 nvgpu_memset(g, mem, 0, 0, mem->size);
2855 nvgpu_free(mem->allocator, 2856 nvgpu_free(mem->allocator,
2856 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 2857 (u64)get_vidmem_page_alloc(mem->sgt->sgl));
2857 gk20a_free_sgtable(g, &mem->sgt); 2858 gk20a_free_sgtable(g, &mem->sgt);
@@ -3170,7 +3171,7 @@ u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
3170void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, 3171void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry,
3171 size_t w, size_t data) 3172 size_t w, size_t data)
3172{ 3173{
3173 gk20a_mem_wr32(g, &entry->mem, entry->woffset + w, data); 3174 nvgpu_mem_wr32(g, &entry->mem, entry->woffset + w, data);
3174} 3175}
3175 3176
3176u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry) 3177u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry)
@@ -3191,7 +3192,7 @@ static inline u32 big_valid_pde0_bits(struct gk20a *g,
3191{ 3192{
3192 u64 pte_addr = gk20a_pde_addr(g, entry); 3193 u64 pte_addr = gk20a_pde_addr(g, entry);
3193 u32 pde0_bits = 3194 u32 pde0_bits =
3194 gk20a_aperture_mask(g, &entry->mem, 3195 nvgpu_aperture_mask(g, &entry->mem,
3195 gmmu_pde_aperture_big_sys_mem_ncoh_f(), 3196 gmmu_pde_aperture_big_sys_mem_ncoh_f(),
3196 gmmu_pde_aperture_big_video_memory_f()) | 3197 gmmu_pde_aperture_big_video_memory_f()) |
3197 gmmu_pde_address_big_sys_f( 3198 gmmu_pde_address_big_sys_f(
@@ -3205,7 +3206,7 @@ static inline u32 small_valid_pde1_bits(struct gk20a *g,
3205{ 3206{
3206 u64 pte_addr = gk20a_pde_addr(g, entry); 3207 u64 pte_addr = gk20a_pde_addr(g, entry);
3207 u32 pde1_bits = 3208 u32 pde1_bits =
3208 gk20a_aperture_mask(g, &entry->mem, 3209 nvgpu_aperture_mask(g, &entry->mem,
3209 gmmu_pde_aperture_small_sys_mem_ncoh_f(), 3210 gmmu_pde_aperture_small_sys_mem_ncoh_f(),
3210 gmmu_pde_aperture_small_video_memory_f()) | 3211 gmmu_pde_aperture_small_video_memory_f()) |
3211 gmmu_pde_vol_small_true_f() | /* tbd: why? */ 3212 gmmu_pde_vol_small_true_f() | /* tbd: why? */
@@ -3230,7 +3231,7 @@ static int update_gmmu_pde_locked(struct vm_gk20a *vm,
3230 u32 kind_v, u64 *ctag, 3231 u32 kind_v, u64 *ctag,
3231 bool cacheable, bool unammped_pte, 3232 bool cacheable, bool unammped_pte,
3232 int rw_flag, bool sparse, bool priv, 3233 int rw_flag, bool sparse, bool priv,
3233 enum gk20a_aperture aperture) 3234 enum nvgpu_aperture aperture)
3234{ 3235{
3235 struct gk20a *g = gk20a_from_vm(vm); 3236 struct gk20a *g = gk20a_from_vm(vm);
3236 bool small_valid, big_valid; 3237 bool small_valid, big_valid;
@@ -3275,7 +3276,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
3275 u32 kind_v, u64 *ctag, 3276 u32 kind_v, u64 *ctag,
3276 bool cacheable, bool unmapped_pte, 3277 bool cacheable, bool unmapped_pte,
3277 int rw_flag, bool sparse, bool priv, 3278 int rw_flag, bool sparse, bool priv,
3278 enum gk20a_aperture aperture) 3279 enum nvgpu_aperture aperture)
3279{ 3280{
3280 struct gk20a *g = gk20a_from_vm(vm); 3281 struct gk20a *g = gk20a_from_vm(vm);
3281 int ctag_shift = ilog2(g->ops.fb.compression_page_size(g)); 3282 int ctag_shift = ilog2(g->ops.fb.compression_page_size(g));
@@ -3296,7 +3297,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
3296 if (priv) 3297 if (priv)
3297 pte_w[0] |= gmmu_pte_privilege_true_f(); 3298 pte_w[0] |= gmmu_pte_privilege_true_f();
3298 3299
3299 pte_w[1] = __gk20a_aperture_mask(g, aperture, 3300 pte_w[1] = __nvgpu_aperture_mask(g, aperture,
3300 gmmu_pte_aperture_sys_mem_ncoh_f(), 3301 gmmu_pte_aperture_sys_mem_ncoh_f(),
3301 gmmu_pte_aperture_video_memory_f()) | 3302 gmmu_pte_aperture_video_memory_f()) |
3302 gmmu_pte_kind_f(kind_v) | 3303 gmmu_pte_kind_f(kind_v) |
@@ -3379,7 +3380,7 @@ static int update_gmmu_level_locked(struct vm_gk20a *vm,
3379 bool sparse, 3380 bool sparse,
3380 int lvl, 3381 int lvl,
3381 bool priv, 3382 bool priv,
3382 enum gk20a_aperture aperture) 3383 enum nvgpu_aperture aperture)
3383{ 3384{
3384 struct gk20a *g = gk20a_from_vm(vm); 3385 struct gk20a *g = gk20a_from_vm(vm);
3385 const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl]; 3386 const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl];
@@ -3477,7 +3478,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3477 int rw_flag, 3478 int rw_flag,
3478 bool sparse, 3479 bool sparse,
3479 bool priv, 3480 bool priv,
3480 enum gk20a_aperture aperture) 3481 enum nvgpu_aperture aperture)
3481{ 3482{
3482 struct gk20a *g = gk20a_from_vm(vm); 3483 struct gk20a *g = gk20a_from_vm(vm);
3483 int ctag_granularity = g->ops.fb.compression_page_size(g); 3484 int ctag_granularity = g->ops.fb.compression_page_size(g);
@@ -4735,14 +4736,14 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
4735 4736
4736 gk20a_dbg_info("pde pa=0x%llx", pdb_addr); 4737 gk20a_dbg_info("pde pa=0x%llx", pdb_addr);
4737 4738
4738 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), 4739 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
4739 gk20a_aperture_mask(g, &vm->pdb.mem, 4740 nvgpu_aperture_mask(g, &vm->pdb.mem,
4740 ram_in_page_dir_base_target_sys_mem_ncoh_f(), 4741 ram_in_page_dir_base_target_sys_mem_ncoh_f(),
4741 ram_in_page_dir_base_target_vid_mem_f()) | 4742 ram_in_page_dir_base_target_vid_mem_f()) |
4742 ram_in_page_dir_base_vol_true_f() | 4743 ram_in_page_dir_base_vol_true_f() |
4743 ram_in_page_dir_base_lo_f(pdb_addr_lo)); 4744 ram_in_page_dir_base_lo_f(pdb_addr_lo));
4744 4745
4745 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), 4746 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(),
4746 ram_in_page_dir_base_hi_f(pdb_addr_hi)); 4747 ram_in_page_dir_base_hi_f(pdb_addr_hi));
4747} 4748}
4748 4749
@@ -4756,10 +4757,10 @@ void gk20a_init_inst_block(struct mem_desc *inst_block, struct vm_gk20a *vm,
4756 4757
4757 g->ops.mm.init_pdb(g, inst_block, vm); 4758 g->ops.mm.init_pdb(g, inst_block, vm);
4758 4759
4759 gk20a_mem_wr32(g, inst_block, ram_in_adr_limit_lo_w(), 4760 nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_lo_w(),
4760 u64_lo32(vm->va_limit - 1) & ~0xfff); 4761 u64_lo32(vm->va_limit - 1) & ~0xfff);
4761 4762
4762 gk20a_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(), 4763 nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(),
4763 ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1))); 4764 ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1)));
4764 4765
4765 if (big_page_size && g->ops.mm.set_big_page_size) 4766 if (big_page_size && g->ops.mm.set_big_page_size)
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index da8bbb0a..3c701907 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -220,7 +220,7 @@ struct gk20a_mmu_level {
220 u32 kind_v, u64 *ctag, 220 u32 kind_v, u64 *ctag,
221 bool cacheable, bool unmapped_pte, 221 bool cacheable, bool unmapped_pte,
222 int rw_flag, bool sparse, bool priv, 222 int rw_flag, bool sparse, bool priv,
223 enum gk20a_aperture aperture); 223 enum nvgpu_aperture aperture);
224 size_t entry_size; 224 size_t entry_size;
225}; 225};
226 226
@@ -514,7 +514,7 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm,
514 u32 flags, 514 u32 flags,
515 int rw_flag, 515 int rw_flag,
516 bool priv, 516 bool priv,
517 enum gk20a_aperture aperture); 517 enum nvgpu_aperture aperture);
518u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm, 518u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
519 struct sg_table **sgt, 519 struct sg_table **sgt,
520 u64 addr, 520 u64 addr,
@@ -522,7 +522,7 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
522 u32 flags, 522 u32 flags,
523 int rw_flag, 523 int rw_flag,
524 bool priv, 524 bool priv,
525 enum gk20a_aperture aperture); 525 enum nvgpu_aperture aperture);
526 526
527/* Flags for the below gk20a_gmmu_{alloc,alloc_map}_flags* */ 527/* Flags for the below gk20a_gmmu_{alloc,alloc_map}_flags* */
528 528
@@ -589,9 +589,9 @@ static inline phys_addr_t gk20a_mem_phys(struct mem_desc *mem)
589 return 0; 589 return 0;
590} 590}
591 591
592u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture, 592u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
593 u32 sysmem_mask, u32 vidmem_mask); 593 u32 sysmem_mask, u32 vidmem_mask);
594u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem, 594u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem,
595 u32 sysmem_mask, u32 vidmem_mask); 595 u32 sysmem_mask, u32 vidmem_mask);
596 596
597void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, 597void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry,
@@ -612,7 +612,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
612 bool sparse, 612 bool sparse,
613 bool priv, 613 bool priv,
614 struct vm_gk20a_mapping_batch *batch, 614 struct vm_gk20a_mapping_batch *batch,
615 enum gk20a_aperture aperture); 615 enum nvgpu_aperture aperture);
616 616
617void gk20a_gmmu_unmap(struct vm_gk20a *vm, 617void gk20a_gmmu_unmap(struct vm_gk20a *vm,
618 u64 vaddr, 618 u64 vaddr,
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 3297d376..e70e50c2 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -307,7 +307,7 @@ static void printtrace(struct pmu_gk20a *pmu)
307 return; 307 return;
308 308
309 /* read pmu traces into system memory buffer */ 309 /* read pmu traces into system memory buffer */
310 gk20a_mem_rd_n(g, &pmu->trace_buf, 310 nvgpu_mem_rd_n(g, &pmu->trace_buf,
311 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE); 311 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE);
312 312
313 trace = (char *)tracebuffer; 313 trace = (char *)tracebuffer;
@@ -3155,7 +3155,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
3155 if (err) 3155 if (err)
3156 goto err_release_fw; 3156 goto err_release_fw;
3157 3157
3158 gk20a_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image, 3158 nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image,
3159 pmu->desc->app_start_offset + pmu->desc->app_size); 3159 pmu->desc->app_start_offset + pmu->desc->app_size);
3160 3160
3161 return gk20a_init_pmu(pmu); 3161 return gk20a_init_pmu(pmu);
@@ -4872,7 +4872,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4872 (struct flcn_mem_desc_v0 *) 4872 (struct flcn_mem_desc_v0 *)
4873 pv->pmu_allocation_get_fb_addr(pmu, in)); 4873 pv->pmu_allocation_get_fb_addr(pmu, in));
4874 4874
4875 gk20a_mem_wr_n(g, seq->in_mem, 0, 4875 nvgpu_mem_wr_n(g, seq->in_mem, 0,
4876 payload->in.buf, payload->in.fb_size); 4876 payload->in.buf, payload->in.fb_size);
4877 4877
4878 } else { 4878 } else {
@@ -5736,7 +5736,7 @@ static int falc_trace_show(struct seq_file *s, void *data)
5736 return -ENOMEM; 5736 return -ENOMEM;
5737 5737
5738 /* read pmu traces into system memory buffer */ 5738 /* read pmu traces into system memory buffer */
5739 gk20a_mem_rd_n(g, &pmu->trace_buf, 5739 nvgpu_mem_rd_n(g, &pmu->trace_buf,
5740 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE); 5740 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE);
5741 5741
5742 trace = (char *)tracebuffer; 5742 trace = (char *)tracebuffer;
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
index bed2e9b5..7e6005a2 100644
--- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
@@ -32,7 +32,7 @@ static u32 gk20a_pramin_enter(struct gk20a *g, struct mem_desc *mem,
32 u32 hi = (u32)((addr & ~(u64)0xfffff) 32 u32 hi = (u32)((addr & ~(u64)0xfffff)
33 >> bus_bar0_window_target_bar0_window_base_shift_v()); 33 >> bus_bar0_window_target_bar0_window_base_shift_v());
34 u32 lo = (u32)(addr & 0xfffff); 34 u32 lo = (u32)(addr & 0xfffff);
35 u32 win = gk20a_aperture_mask(g, mem, 35 u32 win = nvgpu_aperture_mask(g, mem,
36 bus_bar0_window_target_sys_mem_noncoherent_f(), 36 bus_bar0_window_target_sys_mem_noncoherent_f(),
37 bus_bar0_window_target_vid_mem_f()) | 37 bus_bar0_window_target_vid_mem_f()) |
38 bus_bar0_window_base_f(hi); 38 bus_bar0_window_base_f(hi);