summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-15 19:42:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:14:48 -0400
commitb69020bff5dfa69cad926c9374cdbe9a62509ffd (patch)
tree222f6b6bc23561a38004a257cbac401e431ff3be /drivers
parentfa4ecf5730a75269e85cc41c2ad2ee61307e72a9 (diff)
gpu: nvgpu: Rename gk20a_mem_* functions
Rename the functions used for mem_desc access to nvgpu_mem_*. JIRA NVGPU-12 Change-Id: Ibfdc1112d43f0a125e4487c250e3f977ffd2cd75 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1323325 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/boardobj/boardobjgrp.c6
-rw-r--r--drivers/gpu/nvgpu/common/linux/mem_desc.c28
-rw-r--r--drivers/gpu/nvgpu/gk20a/bus_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c54
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fb_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c58
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c190
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c57
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h12
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/pramin_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c14
-rw-r--r--drivers/gpu/nvgpu/gm20b/bus_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c14
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c4
-rw-r--r--drivers/gpu/nvgpu/gp106/acr_gp106.c12
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c4
-rw-r--r--drivers/gpu/nvgpu/gp10b/fifo_gp10b.c40
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c44
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c22
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/mem_desc.h30
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c2
28 files changed, 318 insertions, 317 deletions
diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
index 43928ac1..d193861a 100644
--- a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
+++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c
@@ -409,7 +409,7 @@ u32 boardobjgrp_pmuset_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp)
409 gk20a_pmu_vidmem_surface_alloc(g, &pcmd->surf.vidmem_desc, 409 gk20a_pmu_vidmem_surface_alloc(g, &pcmd->surf.vidmem_desc,
410 pcmd->fbsize); 410 pcmd->fbsize);
411 } 411 }
412 gk20a_mem_wr_n(g, &pcmd->surf.vidmem_desc, 0, pcmd->buf, pcmd->fbsize); 412 nvgpu_mem_wr_n(g, &pcmd->surf.vidmem_desc, 0, pcmd->buf, pcmd->fbsize);
413 413
414 /* Send the SET PMU CMD to the PMU */ 414 /* Send the SET PMU CMD to the PMU */
415 status = boardobjgrp_pmucmdsend(g, pboardobjgrp, 415 status = boardobjgrp_pmucmdsend(g, pboardobjgrp,
@@ -488,7 +488,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp,
488 goto boardobjgrp_pmugetstatus_exit; 488 goto boardobjgrp_pmugetstatus_exit;
489 } 489 }
490 490
491 gk20a_mem_wr_n(g, &pcmd->surf.vidmem_desc, 0, pset->buf, pset->hdrsize); 491 nvgpu_mem_wr_n(g, &pcmd->surf.vidmem_desc, 0, pset->buf, pset->hdrsize);
492 /* Send the GET_STATUS PMU CMD to the PMU */ 492 /* Send the GET_STATUS PMU CMD to the PMU */
493 status = boardobjgrp_pmucmdsend(g, pboardobjgrp, 493 status = boardobjgrp_pmucmdsend(g, pboardobjgrp,
494 &pboardobjgrp->pmu.getstatus); 494 &pboardobjgrp->pmu.getstatus);
@@ -499,7 +499,7 @@ boardobjgrp_pmugetstatus_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp,
499 } 499 }
500 500
501 /*copy the data back to sysmem buffer that belongs to command*/ 501 /*copy the data back to sysmem buffer that belongs to command*/
502 gk20a_mem_rd_n(g, &pcmd->surf.vidmem_desc, 0, pcmd->buf, pcmd->fbsize); 502 nvgpu_mem_rd_n(g, &pcmd->surf.vidmem_desc, 0, pcmd->buf, pcmd->fbsize);
503 503
504boardobjgrp_pmugetstatus_exit: 504boardobjgrp_pmugetstatus_exit:
505 return status; 505 return status;
diff --git a/drivers/gpu/nvgpu/common/linux/mem_desc.c b/drivers/gpu/nvgpu/common/linux/mem_desc.c
index b2ef122e..02c3d1a9 100644
--- a/drivers/gpu/nvgpu/common/linux/mem_desc.c
+++ b/drivers/gpu/nvgpu/common/linux/mem_desc.c
@@ -20,7 +20,7 @@
20#include "gk20a/gk20a.h" 20#include "gk20a/gk20a.h"
21#include "gk20a/mm_gk20a.h" 21#include "gk20a/mm_gk20a.h"
22 22
23u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture, 23u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
24 u32 sysmem_mask, u32 vidmem_mask) 24 u32 sysmem_mask, u32 vidmem_mask)
25{ 25{
26 switch (aperture) { 26 switch (aperture) {
@@ -36,14 +36,14 @@ u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture,
36 return 0; 36 return 0;
37} 37}
38 38
39u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem, 39u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem,
40 u32 sysmem_mask, u32 vidmem_mask) 40 u32 sysmem_mask, u32 vidmem_mask)
41{ 41{
42 return __gk20a_aperture_mask(g, mem->aperture, 42 return __nvgpu_aperture_mask(g, mem->aperture,
43 sysmem_mask, vidmem_mask); 43 sysmem_mask, vidmem_mask);
44} 44}
45 45
46int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem) 46int nvgpu_mem_begin(struct gk20a *g, struct mem_desc *mem)
47{ 47{
48 void *cpu_va; 48 void *cpu_va;
49 49
@@ -66,7 +66,7 @@ int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem)
66 return 0; 66 return 0;
67} 67}
68 68
69void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem) 69void nvgpu_mem_end(struct gk20a *g, struct mem_desc *mem)
70{ 70{
71 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) 71 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
72 return; 72 return;
@@ -75,7 +75,7 @@ void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem)
75 mem->cpu_va = NULL; 75 mem->cpu_va = NULL;
76} 76}
77 77
78u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w) 78u32 nvgpu_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w)
79{ 79{
80 u32 data = 0; 80 u32 data = 0;
81 81
@@ -103,13 +103,13 @@ u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w)
103 return data; 103 return data;
104} 104}
105 105
106u32 gk20a_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset) 106u32 nvgpu_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset)
107{ 107{
108 WARN_ON(offset & 3); 108 WARN_ON(offset & 3);
109 return gk20a_mem_rd32(g, mem, offset / sizeof(u32)); 109 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
110} 110}
111 111
112void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem, 112void nvgpu_mem_rd_n(struct gk20a *g, struct mem_desc *mem,
113 u32 offset, void *dest, u32 size) 113 u32 offset, void *dest, u32 size)
114{ 114{
115 WARN_ON(offset & 3); 115 WARN_ON(offset & 3);
@@ -135,7 +135,7 @@ void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem,
135 } 135 }
136} 136}
137 137
138void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data) 138void nvgpu_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data)
139{ 139{
140 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { 140 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
141 u32 *ptr = mem->cpu_va; 141 u32 *ptr = mem->cpu_va;
@@ -158,13 +158,13 @@ void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data)
158 } 158 }
159} 159}
160 160
161void gk20a_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data) 161void nvgpu_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data)
162{ 162{
163 WARN_ON(offset & 3); 163 WARN_ON(offset & 3);
164 gk20a_mem_wr32(g, mem, offset / sizeof(u32), data); 164 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
165} 165}
166 166
167void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset, 167void nvgpu_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
168 void *src, u32 size) 168 void *src, u32 size)
169{ 169{
170 WARN_ON(offset & 3); 170 WARN_ON(offset & 3);
@@ -192,7 +192,7 @@ void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
192 } 192 }
193} 193}
194 194
195void gk20a_memset(struct gk20a *g, struct mem_desc *mem, u32 offset, 195void nvgpu_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
196 u32 c, u32 size) 196 u32 c, u32 size)
197{ 197{
198 WARN_ON(offset & 3); 198 WARN_ON(offset & 3);
diff --git a/drivers/gpu/nvgpu/gk20a/bus_gk20a.c b/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
index fda1f80e..d161a29c 100644
--- a/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/bus_gk20a.c
@@ -138,7 +138,7 @@ static int gk20a_bus_bar1_bind(struct gk20a *g, struct mem_desc *bar1_inst)
138 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v); 138 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v);
139 139
140 gk20a_writel(g, bus_bar1_block_r(), 140 gk20a_writel(g, bus_bar1_block_r(),
141 gk20a_aperture_mask(g, bar1_inst, 141 nvgpu_aperture_mask(g, bar1_inst,
142 bus_bar1_block_target_sys_mem_ncoh_f(), 142 bus_bar1_block_target_sys_mem_ncoh_f(),
143 bus_bar1_block_target_vid_mem_f()) | 143 bus_bar1_block_target_vid_mem_f()) |
144 bus_bar1_block_mode_virtual_f() | 144 bus_bar1_block_mode_virtual_f() |
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index b7306369..e13a903f 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -2124,7 +2124,7 @@ static void gk20a_submit_append_priv_cmdbuf(struct channel_gk20a *c,
2124 pbdma_gp_entry1_length_f(cmd->size) 2124 pbdma_gp_entry1_length_f(cmd->size)
2125 }; 2125 };
2126 2126
2127 gk20a_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x), 2127 nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x),
2128 &x, sizeof(x)); 2128 &x, sizeof(x));
2129 2129
2130 if (cmd->mem->aperture == APERTURE_SYSMEM) 2130 if (cmd->mem->aperture == APERTURE_SYSMEM)
@@ -2207,10 +2207,10 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c,
2207 int length1 = len - length0; 2207 int length1 = len - length0;
2208 void *src2 = (u8 *)cpu_src + length0; 2208 void *src2 = (u8 *)cpu_src + length0;
2209 2209
2210 gk20a_mem_wr_n(c->g, gpfifo_mem, start, cpu_src, length0); 2210 nvgpu_mem_wr_n(c->g, gpfifo_mem, start, cpu_src, length0);
2211 gk20a_mem_wr_n(c->g, gpfifo_mem, 0, src2, length1); 2211 nvgpu_mem_wr_n(c->g, gpfifo_mem, 0, src2, length1);
2212 } else { 2212 } else {
2213 gk20a_mem_wr_n(c->g, gpfifo_mem, start, cpu_src, len); 2213 nvgpu_mem_wr_n(c->g, gpfifo_mem, start, cpu_src, len);
2214 2214
2215 } 2215 }
2216 2216
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 8baf60dd..d9dfb133 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -46,13 +46,13 @@ static void add_wait_cmd(struct gk20a *g, struct priv_cmd_entry *cmd, u32 off,
46{ 46{
47 off = cmd->off + off; 47 off = cmd->off + off;
48 /* syncpoint_a */ 48 /* syncpoint_a */
49 gk20a_mem_wr32(g, cmd->mem, off++, 0x2001001C); 49 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001C);
50 /* payload */ 50 /* payload */
51 gk20a_mem_wr32(g, cmd->mem, off++, thresh); 51 nvgpu_mem_wr32(g, cmd->mem, off++, thresh);
52 /* syncpoint_b */ 52 /* syncpoint_b */
53 gk20a_mem_wr32(g, cmd->mem, off++, 0x2001001D); 53 nvgpu_mem_wr32(g, cmd->mem, off++, 0x2001001D);
54 /* syncpt_id, switch_en, wait */ 54 /* syncpt_id, switch_en, wait */
55 gk20a_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10); 55 nvgpu_mem_wr32(g, cmd->mem, off++, (id << 8) | 0x10);
56} 56}
57 57
58static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, 58static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
@@ -151,7 +151,7 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
151 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, 151 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev,
152 wait_id, wait_value)) { 152 wait_id, wait_value)) {
153 /* each wait_cmd is 4 u32s */ 153 /* each wait_cmd is 4 u32s */
154 gk20a_memset(c->g, wait_cmd->mem, 154 nvgpu_memset(c->g, wait_cmd->mem,
155 (wait_cmd->off + i * 4) * sizeof(u32), 155 (wait_cmd->off + i * 4) * sizeof(u32),
156 0, 4 * sizeof(u32)); 156 0, 4 * sizeof(u32));
157 } else 157 } else
@@ -212,22 +212,22 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
212 212
213 if (wfi_cmd) { 213 if (wfi_cmd) {
214 /* wfi */ 214 /* wfi */
215 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001E); 215 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001E);
216 /* handle, ignored */ 216 /* handle, ignored */
217 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x00000000); 217 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x00000000);
218 } 218 }
219 /* syncpoint_a */ 219 /* syncpoint_a */
220 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001C); 220 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001C);
221 /* payload, ignored */ 221 /* payload, ignored */
222 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0); 222 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0);
223 /* syncpoint_b */ 223 /* syncpoint_b */
224 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D); 224 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D);
225 /* syncpt_id, incr */ 225 /* syncpt_id, incr */
226 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1); 226 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1);
227 /* syncpoint_b */ 227 /* syncpoint_b */
228 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D); 228 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, 0x2001001D);
229 /* syncpt_id, incr */ 229 /* syncpt_id, incr */
230 gk20a_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1); 230 nvgpu_mem_wr32(c->g, incr_cmd->mem, off++, (sp->id << 8) | 0x1);
231 231
232 WARN_ON(off - incr_cmd->off != incr_cmd_size); 232 WARN_ON(off - incr_cmd->off != incr_cmd_size);
233 233
@@ -531,39 +531,39 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
531 nvgpu_semaphore_incr(s); 531 nvgpu_semaphore_incr(s);
532 532
533 /* semaphore_a */ 533 /* semaphore_a */
534 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010004); 534 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004);
535 /* offset_upper */ 535 /* offset_upper */
536 gk20a_mem_wr32(g, cmd->mem, off++, (va >> 32) & 0xff); 536 nvgpu_mem_wr32(g, cmd->mem, off++, (va >> 32) & 0xff);
537 /* semaphore_b */ 537 /* semaphore_b */
538 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010005); 538 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005);
539 /* offset */ 539 /* offset */
540 gk20a_mem_wr32(g, cmd->mem, off++, va & 0xffffffff); 540 nvgpu_mem_wr32(g, cmd->mem, off++, va & 0xffffffff);
541 541
542 if (acquire) { 542 if (acquire) {
543 /* semaphore_c */ 543 /* semaphore_c */
544 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006); 544 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006);
545 /* payload */ 545 /* payload */
546 gk20a_mem_wr32(g, cmd->mem, off++, 546 nvgpu_mem_wr32(g, cmd->mem, off++,
547 nvgpu_semaphore_get_value(s)); 547 nvgpu_semaphore_get_value(s));
548 /* semaphore_d */ 548 /* semaphore_d */
549 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007); 549 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007);
550 /* operation: acq_geq, switch_en */ 550 /* operation: acq_geq, switch_en */
551 gk20a_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12)); 551 nvgpu_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12));
552 } else { 552 } else {
553 /* semaphore_c */ 553 /* semaphore_c */
554 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006); 554 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006);
555 /* payload */ 555 /* payload */
556 gk20a_mem_wr32(g, cmd->mem, off++, 556 nvgpu_mem_wr32(g, cmd->mem, off++,
557 nvgpu_semaphore_get_value(s)); 557 nvgpu_semaphore_get_value(s));
558 /* semaphore_d */ 558 /* semaphore_d */
559 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007); 559 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007);
560 /* operation: release, wfi */ 560 /* operation: release, wfi */
561 gk20a_mem_wr32(g, cmd->mem, off++, 561 nvgpu_mem_wr32(g, cmd->mem, off++,
562 0x2 | ((wfi ? 0x0 : 0x1) << 20)); 562 0x2 | ((wfi ? 0x0 : 0x1) << 20));
563 /* non_stall_int */ 563 /* non_stall_int */
564 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010008); 564 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010008);
565 /* ignored */ 565 /* ignored */
566 gk20a_mem_wr32(g, cmd->mem, off++, 0); 566 nvgpu_mem_wr32(g, cmd->mem, off++, 0);
567 } 567 }
568 568
569 if (acquire) 569 if (acquire)
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index 7e7c9cb8..5724be72 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -87,7 +87,7 @@ static void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
87 87
88 ch_state[chid]->pid = ch->pid; 88 ch_state[chid]->pid = ch->pid;
89 ch_state[chid]->refs = atomic_read(&ch->ref_count); 89 ch_state[chid]->refs = atomic_read(&ch->ref_count);
90 gk20a_mem_rd_n(g, &ch->inst_block, 0, 90 nvgpu_mem_rd_n(g, &ch->inst_block, 0,
91 &ch_state[chid]->inst_block[0], 91 &ch_state[chid]->inst_block[0],
92 ram_in_alloc_size_v()); 92 ram_in_alloc_size_v());
93 gk20a_channel_put(ch); 93 gk20a_channel_put(ch);
diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
index 44f0ac4c..2e0809ee 100644
--- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
@@ -135,7 +135,7 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct mem_desc *pdb)
135 135
136 gk20a_writel(g, fb_mmu_invalidate_pdb_r(), 136 gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
137 fb_mmu_invalidate_pdb_addr_f(addr_lo) | 137 fb_mmu_invalidate_pdb_addr_f(addr_lo) |
138 gk20a_aperture_mask(g, pdb, 138 nvgpu_aperture_mask(g, pdb,
139 fb_mmu_invalidate_pdb_aperture_sys_mem_f(), 139 fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
140 fb_mmu_invalidate_pdb_aperture_vid_mem_f())); 140 fb_mmu_invalidate_pdb_aperture_vid_mem_f()));
141 141
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
index 4fa71797..b4e3bad1 100644
--- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
@@ -636,11 +636,11 @@ static int gk20a_fecs_trace_bind_channel(struct gk20a *g,
636 pa = gk20a_mm_inst_block_addr(g, &trace->trace_buf); 636 pa = gk20a_mm_inst_block_addr(g, &trace->trace_buf);
637 if (!pa) 637 if (!pa)
638 return -ENOMEM; 638 return -ENOMEM;
639 aperture = gk20a_aperture_mask(g, &trace->trace_buf, 639 aperture = nvgpu_aperture_mask(g, &trace->trace_buf,
640 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(), 640 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(),
641 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f()); 641 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f());
642 642
643 if (gk20a_mem_begin(g, mem)) 643 if (nvgpu_mem_begin(g, mem))
644 return -ENOMEM; 644 return -ENOMEM;
645 645
646 lo = u64_lo32(pa); 646 lo = u64_lo32(pa);
@@ -649,19 +649,19 @@ static int gk20a_fecs_trace_bind_channel(struct gk20a *g,
649 gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, 649 gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi,
650 lo, GK20A_FECS_TRACE_NUM_RECORDS); 650 lo, GK20A_FECS_TRACE_NUM_RECORDS);
651 651
652 gk20a_mem_wr(g, mem, 652 nvgpu_mem_wr(g, mem,
653 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(), 653 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o(),
654 lo); 654 lo);
655 gk20a_mem_wr(g, mem, 655 nvgpu_mem_wr(g, mem,
656 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(), 656 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o(),
657 ctxsw_prog_main_image_context_timestamp_buffer_ptr_v_f(hi) | 657 ctxsw_prog_main_image_context_timestamp_buffer_ptr_v_f(hi) |
658 aperture); 658 aperture);
659 gk20a_mem_wr(g, mem, 659 nvgpu_mem_wr(g, mem,
660 ctxsw_prog_main_image_context_timestamp_buffer_control_o(), 660 ctxsw_prog_main_image_context_timestamp_buffer_control_o(),
661 ctxsw_prog_main_image_context_timestamp_buffer_control_num_records_f( 661 ctxsw_prog_main_image_context_timestamp_buffer_control_num_records_f(
662 GK20A_FECS_TRACE_NUM_RECORDS)); 662 GK20A_FECS_TRACE_NUM_RECORDS));
663 663
664 gk20a_mem_end(g, mem); 664 nvgpu_mem_end(g, mem);
665 665
666 /* pid (process identifier) in user space, corresponds to tgid (thread 666 /* pid (process identifier) in user space, corresponds to tgid (thread
667 * group id) in kernel space. 667 * group id) in kernel space.
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index b2a6b1a0..b8b0c9b0 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -3141,7 +3141,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
3141 if (count != 0) { 3141 if (count != 0) {
3142 gk20a_writel(g, fifo_runlist_base_r(), 3142 gk20a_writel(g, fifo_runlist_base_r(),
3143 fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) | 3143 fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) |
3144 gk20a_aperture_mask(g, &runlist->mem[new_buf], 3144 nvgpu_aperture_mask(g, &runlist->mem[new_buf],
3145 fifo_runlist_base_target_sys_mem_ncoh_f(), 3145 fifo_runlist_base_target_sys_mem_ncoh_f(),
3146 fifo_runlist_base_target_vid_mem_f())); 3146 fifo_runlist_base_target_vid_mem_f()));
3147 } 3147 }
@@ -3901,7 +3901,7 @@ static void gk20a_fifo_channel_bind(struct channel_gk20a *c)
3901 3901
3902 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), 3902 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid),
3903 ccsr_channel_inst_ptr_f(inst_ptr) | 3903 ccsr_channel_inst_ptr_f(inst_ptr) |
3904 gk20a_aperture_mask(g, &c->inst_block, 3904 nvgpu_aperture_mask(g, &c->inst_block,
3905 ccsr_channel_inst_target_sys_mem_ncoh_f(), 3905 ccsr_channel_inst_target_sys_mem_ncoh_f(),
3906 ccsr_channel_inst_target_vid_mem_f()) | 3906 ccsr_channel_inst_target_vid_mem_f()) |
3907 ccsr_channel_inst_bind_true_f()); 3907 ccsr_channel_inst_bind_true_f());
@@ -3943,14 +3943,14 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
3943 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 3943 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
3944 c->hw_chid, (u64)c->userd_iova); 3944 c->hw_chid, (u64)c->userd_iova);
3945 3945
3946 gk20a_mem_wr32(g, &c->inst_block, 3946 nvgpu_mem_wr32(g, &c->inst_block,
3947 ram_in_ramfc_w() + ram_fc_userd_w(), 3947 ram_in_ramfc_w() + ram_fc_userd_w(),
3948 gk20a_aperture_mask(g, &g->fifo.userd, 3948 nvgpu_aperture_mask(g, &g->fifo.userd,
3949 pbdma_userd_target_sys_mem_ncoh_f(), 3949 pbdma_userd_target_sys_mem_ncoh_f(),
3950 pbdma_userd_target_vid_mem_f()) | 3950 pbdma_userd_target_vid_mem_f()) |
3951 pbdma_userd_addr_f(addr_lo)); 3951 pbdma_userd_addr_f(addr_lo));
3952 3952
3953 gk20a_mem_wr32(g, &c->inst_block, 3953 nvgpu_mem_wr32(g, &c->inst_block,
3954 ram_in_ramfc_w() + ram_fc_userd_hi_w(), 3954 ram_in_ramfc_w() + ram_fc_userd_hi_w(),
3955 pbdma_userd_hi_addr_f(addr_hi)); 3955 pbdma_userd_hi_addr_f(addr_hi));
3956 3956
@@ -3967,25 +3967,25 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3967 3967
3968 gk20a_dbg_fn(""); 3968 gk20a_dbg_fn("");
3969 3969
3970 gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v()); 3970 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
3971 3971
3972 gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(), 3972 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
3973 pbdma_gp_base_offset_f( 3973 pbdma_gp_base_offset_f(
3974 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); 3974 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
3975 3975
3976 gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), 3976 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
3977 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | 3977 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
3978 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); 3978 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
3979 3979
3980 gk20a_mem_wr32(g, mem, ram_fc_signature_w(), 3980 nvgpu_mem_wr32(g, mem, ram_fc_signature_w(),
3981 c->g->ops.fifo.get_pbdma_signature(c->g)); 3981 c->g->ops.fifo.get_pbdma_signature(c->g));
3982 3982
3983 gk20a_mem_wr32(g, mem, ram_fc_formats_w(), 3983 nvgpu_mem_wr32(g, mem, ram_fc_formats_w(),
3984 pbdma_formats_gp_fermi0_f() | 3984 pbdma_formats_gp_fermi0_f() |
3985 pbdma_formats_pb_fermi1_f() | 3985 pbdma_formats_pb_fermi1_f() |
3986 pbdma_formats_mp_fermi0_f()); 3986 pbdma_formats_mp_fermi0_f());
3987 3987
3988 gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(), 3988 nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(),
3989 pbdma_pb_header_priv_user_f() | 3989 pbdma_pb_header_priv_user_f() |
3990 pbdma_pb_header_method_zero_f() | 3990 pbdma_pb_header_method_zero_f() |
3991 pbdma_pb_header_subchannel_zero_f() | 3991 pbdma_pb_header_subchannel_zero_f() |
@@ -3993,27 +3993,27 @@ int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
3993 pbdma_pb_header_first_true_f() | 3993 pbdma_pb_header_first_true_f() |
3994 pbdma_pb_header_type_inc_f()); 3994 pbdma_pb_header_type_inc_f());
3995 3995
3996 gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(), 3996 nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(),
3997 pbdma_subdevice_id_f(1) | 3997 pbdma_subdevice_id_f(1) |
3998 pbdma_subdevice_status_active_f() | 3998 pbdma_subdevice_status_active_f() |
3999 pbdma_subdevice_channel_dma_enable_f()); 3999 pbdma_subdevice_channel_dma_enable_f());
4000 4000
4001 gk20a_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); 4001 nvgpu_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f());
4002 4002
4003 gk20a_mem_wr32(g, mem, ram_fc_acquire_w(), 4003 nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(),
4004 g->ops.fifo.pbdma_acquire_val(timeout)); 4004 g->ops.fifo.pbdma_acquire_val(timeout));
4005 4005
4006 gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), 4006 nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
4007 fifo_runlist_timeslice_timeout_128_f() | 4007 fifo_runlist_timeslice_timeout_128_f() |
4008 fifo_runlist_timeslice_timescale_3_f() | 4008 fifo_runlist_timeslice_timescale_3_f() |
4009 fifo_runlist_timeslice_enable_true_f()); 4009 fifo_runlist_timeslice_enable_true_f());
4010 4010
4011 gk20a_mem_wr32(g, mem, ram_fc_pb_timeslice_w(), 4011 nvgpu_mem_wr32(g, mem, ram_fc_pb_timeslice_w(),
4012 fifo_pb_timeslice_timeout_16_f() | 4012 fifo_pb_timeslice_timeout_16_f() |
4013 fifo_pb_timeslice_timescale_0_f() | 4013 fifo_pb_timeslice_timescale_0_f() |
4014 fifo_pb_timeslice_enable_true_f()); 4014 fifo_pb_timeslice_enable_true_f());
4015 4015
4016 gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 4016 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
4017 4017
4018 if (c->is_privileged_channel) 4018 if (c->is_privileged_channel)
4019 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 4019 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
@@ -4035,7 +4035,7 @@ static int channel_gk20a_set_schedule_params(struct channel_gk20a *c)
4035 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->hw_chid)); 4035 WARN_ON(c->g->ops.fifo.preempt_channel(c->g, c->hw_chid));
4036 4036
4037 /* set new timeslice */ 4037 /* set new timeslice */
4038 gk20a_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(), 4038 nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_runlist_timeslice_w(),
4039 value | (shift << 12) | 4039 value | (shift << 12) |
4040 fifo_runlist_timeslice_enable_true_f()); 4040 fifo_runlist_timeslice_enable_true_f());
4041 4041
@@ -4102,7 +4102,7 @@ void gk20a_fifo_setup_ramfc_for_privileged_channel(struct channel_gk20a *c)
4102 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid); 4102 gk20a_dbg_info("channel %d : set ramfc privileged_channel", c->hw_chid);
4103 4103
4104 /* Enable HCE priv mode for phys mode transfer */ 4104 /* Enable HCE priv mode for phys mode transfer */
4105 gk20a_mem_wr32(g, mem, ram_fc_hce_ctrl_w(), 4105 nvgpu_mem_wr32(g, mem, ram_fc_hce_ctrl_w(),
4106 pbdma_hce_ctrl_hce_priv_mode_yes_f()); 4106 pbdma_hce_ctrl_hce_priv_mode_yes_f());
4107} 4107}
4108 4108
@@ -4114,16 +4114,16 @@ int gk20a_fifo_setup_userd(struct channel_gk20a *c)
4114 4114
4115 gk20a_dbg_fn(""); 4115 gk20a_dbg_fn("");
4116 4116
4117 gk20a_mem_wr32(g, mem, offset + ram_userd_put_w(), 0); 4117 nvgpu_mem_wr32(g, mem, offset + ram_userd_put_w(), 0);
4118 gk20a_mem_wr32(g, mem, offset + ram_userd_get_w(), 0); 4118 nvgpu_mem_wr32(g, mem, offset + ram_userd_get_w(), 0);
4119 gk20a_mem_wr32(g, mem, offset + ram_userd_ref_w(), 0); 4119 nvgpu_mem_wr32(g, mem, offset + ram_userd_ref_w(), 0);
4120 gk20a_mem_wr32(g, mem, offset + ram_userd_put_hi_w(), 0); 4120 nvgpu_mem_wr32(g, mem, offset + ram_userd_put_hi_w(), 0);
4121 gk20a_mem_wr32(g, mem, offset + ram_userd_ref_threshold_w(), 0); 4121 nvgpu_mem_wr32(g, mem, offset + ram_userd_ref_threshold_w(), 0);
4122 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_w(), 0); 4122 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_w(), 0);
4123 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_hi_w(), 0); 4123 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_top_level_get_hi_w(), 0);
4124 gk20a_mem_wr32(g, mem, offset + ram_userd_get_hi_w(), 0); 4124 nvgpu_mem_wr32(g, mem, offset + ram_userd_get_hi_w(), 0);
4125 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_get_w(), 0); 4125 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_get_w(), 0);
4126 gk20a_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0); 4126 nvgpu_mem_wr32(g, mem, offset + ram_userd_gp_put_w(), 0);
4127 4127
4128 return 0; 4128 return 0;
4129} 4129}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 2a9f8a06..db7b3c5d 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -617,7 +617,7 @@ struct gpu_ops {
617 bool sparse, 617 bool sparse,
618 bool priv, 618 bool priv,
619 struct vm_gk20a_mapping_batch *batch, 619 struct vm_gk20a_mapping_batch *batch,
620 enum gk20a_aperture aperture); 620 enum nvgpu_aperture aperture);
621 void (*gmmu_unmap)(struct vm_gk20a *vm, 621 void (*gmmu_unmap)(struct vm_gk20a *vm,
622 u64 vaddr, 622 u64 vaddr,
623 u64 size, 623 u64 size,
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 3e9a388b..360b8c97 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -110,13 +110,13 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
110 Flush and invalidate before cpu update. */ 110 Flush and invalidate before cpu update. */
111 g->ops.mm.l2_flush(g, true); 111 g->ops.mm.l2_flush(g, true);
112 112
113 if (gk20a_mem_begin(g, &ch_ctx->gr_ctx->mem)) 113 if (nvgpu_mem_begin(g, &ch_ctx->gr_ctx->mem))
114 return -ENOMEM; 114 return -ENOMEM;
115 115
116 *ctx_id = gk20a_mem_rd(g, &ch_ctx->gr_ctx->mem, 116 *ctx_id = nvgpu_mem_rd(g, &ch_ctx->gr_ctx->mem,
117 ctxsw_prog_main_image_context_id_o()); 117 ctxsw_prog_main_image_context_id_o());
118 118
119 gk20a_mem_end(g, &ch_ctx->gr_ctx->mem); 119 nvgpu_mem_end(g, &ch_ctx->gr_ctx->mem);
120 120
121 return 0; 121 return 0;
122} 122}
@@ -649,11 +649,11 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
649 addr_lo = u64_lo32(gpu_va) >> 12; 649 addr_lo = u64_lo32(gpu_va) >> 12;
650 addr_hi = u64_hi32(gpu_va); 650 addr_hi = u64_hi32(gpu_va);
651 651
652 gk20a_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_target_w(), 652 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_target_w(),
653 ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() | 653 ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() |
654 ram_in_gr_wfi_ptr_lo_f(addr_lo)); 654 ram_in_gr_wfi_ptr_lo_f(addr_lo));
655 655
656 gk20a_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_ptr_hi_w(), 656 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_ptr_hi_w(),
657 ram_in_gr_wfi_ptr_hi_f(addr_hi)); 657 ram_in_gr_wfi_ptr_hi_f(addr_hi));
658 658
659 return 0; 659 return 0;
@@ -670,16 +670,16 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
670int gr_gk20a_ctx_patch_write_begin(struct gk20a *g, 670int gr_gk20a_ctx_patch_write_begin(struct gk20a *g,
671 struct channel_ctx_gk20a *ch_ctx) 671 struct channel_ctx_gk20a *ch_ctx)
672{ 672{
673 return gk20a_mem_begin(g, &ch_ctx->patch_ctx.mem); 673 return nvgpu_mem_begin(g, &ch_ctx->patch_ctx.mem);
674} 674}
675 675
676void gr_gk20a_ctx_patch_write_end(struct gk20a *g, 676void gr_gk20a_ctx_patch_write_end(struct gk20a *g,
677 struct channel_ctx_gk20a *ch_ctx) 677 struct channel_ctx_gk20a *ch_ctx)
678{ 678{
679 gk20a_mem_end(g, &ch_ctx->patch_ctx.mem); 679 nvgpu_mem_end(g, &ch_ctx->patch_ctx.mem);
680 /* Write context count to context image if it is mapped */ 680 /* Write context count to context image if it is mapped */
681 if (ch_ctx->gr_ctx->mem.cpu_va) { 681 if (ch_ctx->gr_ctx->mem.cpu_va) {
682 gk20a_mem_wr(g, &ch_ctx->gr_ctx->mem, 682 nvgpu_mem_wr(g, &ch_ctx->gr_ctx->mem,
683 ctxsw_prog_main_image_patch_count_o(), 683 ctxsw_prog_main_image_patch_count_o(),
684 ch_ctx->patch_ctx.data_count); 684 ch_ctx->patch_ctx.data_count);
685 } 685 }
@@ -691,8 +691,8 @@ void gr_gk20a_ctx_patch_write(struct gk20a *g,
691{ 691{
692 if (patch) { 692 if (patch) {
693 u32 patch_slot = ch_ctx->patch_ctx.data_count * 2; 693 u32 patch_slot = ch_ctx->patch_ctx.data_count * 2;
694 gk20a_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot, addr); 694 nvgpu_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot, addr);
695 gk20a_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot + 1, data); 695 nvgpu_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot + 1, data);
696 ch_ctx->patch_ctx.data_count++; 696 ch_ctx->patch_ctx.data_count++;
697 } else { 697 } else {
698 gk20a_writel(g, addr, data); 698 gk20a_writel(g, addr, data);
@@ -703,7 +703,7 @@ static u32 fecs_current_ctx_data(struct gk20a *g, struct mem_desc *inst_block)
703{ 703{
704 u32 ptr = u64_lo32(gk20a_mm_inst_block_addr(g, inst_block) 704 u32 ptr = u64_lo32(gk20a_mm_inst_block_addr(g, inst_block)
705 >> ram_in_base_shift_v()); 705 >> ram_in_base_shift_v());
706 u32 aperture = gk20a_aperture_mask(g, inst_block, 706 u32 aperture = nvgpu_aperture_mask(g, inst_block,
707 gr_fecs_current_ctx_target_sys_mem_ncoh_f(), 707 gr_fecs_current_ctx_target_sys_mem_ncoh_f(),
708 gr_fecs_current_ctx_target_vid_mem_f()); 708 gr_fecs_current_ctx_target_vid_mem_f());
709 709
@@ -745,7 +745,7 @@ void gr_gk20a_write_zcull_ptr(struct gk20a *g,
745{ 745{
746 u32 va = u64_lo32(gpu_va >> 8); 746 u32 va = u64_lo32(gpu_va >> 8);
747 747
748 gk20a_mem_wr(g, mem, 748 nvgpu_mem_wr(g, mem,
749 ctxsw_prog_main_image_zcull_ptr_o(), va); 749 ctxsw_prog_main_image_zcull_ptr_o(), va);
750} 750}
751 751
@@ -754,7 +754,7 @@ void gr_gk20a_write_pm_ptr(struct gk20a *g,
754{ 754{
755 u32 va = u64_lo32(gpu_va >> 8); 755 u32 va = u64_lo32(gpu_va >> 8);
756 756
757 gk20a_mem_wr(g, mem, 757 nvgpu_mem_wr(g, mem,
758 ctxsw_prog_main_image_pm_ptr_o(), va); 758 ctxsw_prog_main_image_pm_ptr_o(), va);
759} 759}
760 760
@@ -768,10 +768,10 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
768 768
769 gk20a_dbg_fn(""); 769 gk20a_dbg_fn("");
770 770
771 if (gk20a_mem_begin(g, mem)) 771 if (nvgpu_mem_begin(g, mem))
772 return -ENOMEM; 772 return -ENOMEM;
773 773
774 if (gk20a_mem_begin(g, ctxheader)) { 774 if (nvgpu_mem_begin(g, ctxheader)) {
775 ret = -ENOMEM; 775 ret = -ENOMEM;
776 goto clean_up_mem; 776 goto clean_up_mem;
777 } 777 }
@@ -795,7 +795,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
795 goto clean_up; 795 goto clean_up;
796 } 796 }
797 797
798 gk20a_mem_wr(g, mem, 798 nvgpu_mem_wr(g, mem,
799 ctxsw_prog_main_image_zcull_o(), 799 ctxsw_prog_main_image_zcull_o(),
800 ch_ctx->zcull_ctx.ctx_sw_mode); 800 ch_ctx->zcull_ctx.ctx_sw_mode);
801 801
@@ -808,9 +808,9 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
808 gk20a_enable_channel_tsg(g, c); 808 gk20a_enable_channel_tsg(g, c);
809 809
810clean_up: 810clean_up:
811 gk20a_mem_end(g, ctxheader); 811 nvgpu_mem_end(g, ctxheader);
812clean_up_mem: 812clean_up_mem:
813 gk20a_mem_end(g, mem); 813 nvgpu_mem_end(g, mem);
814 814
815 return ret; 815 return ret;
816} 816}
@@ -1756,10 +1756,10 @@ restore_fe_go_idle:
1756 goto restore_fe_go_idle; 1756 goto restore_fe_go_idle;
1757 } 1757 }
1758 1758
1759 if (gk20a_mem_begin(g, gold_mem)) 1759 if (nvgpu_mem_begin(g, gold_mem))
1760 goto clean_up; 1760 goto clean_up;
1761 1761
1762 if (gk20a_mem_begin(g, gr_mem)) 1762 if (nvgpu_mem_begin(g, gr_mem))
1763 goto clean_up; 1763 goto clean_up;
1764 1764
1765 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32)); 1765 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32));
@@ -1768,26 +1768,26 @@ restore_fe_go_idle:
1768 g->ops.mm.l2_flush(g, true); 1768 g->ops.mm.l2_flush(g, true);
1769 1769
1770 for (i = 0; i < ctx_header_words; i++) { 1770 for (i = 0; i < ctx_header_words; i++) {
1771 data = gk20a_mem_rd32(g, gr_mem, i); 1771 data = nvgpu_mem_rd32(g, gr_mem, i);
1772 gk20a_mem_wr32(g, gold_mem, i, data); 1772 nvgpu_mem_wr32(g, gold_mem, i, data);
1773 } 1773 }
1774 gk20a_mem_wr(g, gold_mem, ctxsw_prog_main_image_zcull_o(), 1774 nvgpu_mem_wr(g, gold_mem, ctxsw_prog_main_image_zcull_o(),
1775 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v()); 1775 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v());
1776 1776
1777 if (gk20a_mem_begin(g, ctxheader)) 1777 if (nvgpu_mem_begin(g, ctxheader))
1778 goto clean_up; 1778 goto clean_up;
1779 1779
1780 if (ctxheader->gpu_va) 1780 if (ctxheader->gpu_va)
1781 g->ops.gr.write_zcull_ptr(g, ctxheader, 0); 1781 g->ops.gr.write_zcull_ptr(g, ctxheader, 0);
1782 else 1782 else
1783 g->ops.gr.write_zcull_ptr(g, gold_mem, 0); 1783 g->ops.gr.write_zcull_ptr(g, gold_mem, 0);
1784 gk20a_mem_end(g, ctxheader); 1784 nvgpu_mem_end(g, ctxheader);
1785 1785
1786 g->ops.gr.commit_inst(c, ch_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]); 1786 g->ops.gr.commit_inst(c, ch_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]);
1787 1787
1788 gr_gk20a_fecs_ctx_image_save(c, gr_fecs_method_push_adr_wfi_golden_save_v()); 1788 gr_gk20a_fecs_ctx_image_save(c, gr_fecs_method_push_adr_wfi_golden_save_v());
1789 1789
1790 if (gk20a_mem_begin(g, ctxheader)) 1790 if (nvgpu_mem_begin(g, ctxheader))
1791 goto clean_up; 1791 goto clean_up;
1792 1792
1793 if (gr->ctx_vars.local_golden_image == NULL) { 1793 if (gr->ctx_vars.local_golden_image == NULL) {
@@ -1801,15 +1801,15 @@ restore_fe_go_idle:
1801 } 1801 }
1802 1802
1803 if (ctxheader->gpu_va) 1803 if (ctxheader->gpu_va)
1804 gk20a_mem_rd_n(g, ctxheader, 0, 1804 nvgpu_mem_rd_n(g, ctxheader, 0,
1805 gr->ctx_vars.local_golden_image, 1805 gr->ctx_vars.local_golden_image,
1806 gr->ctx_vars.golden_image_size); 1806 gr->ctx_vars.golden_image_size);
1807 else 1807 else
1808 gk20a_mem_rd_n(g, gold_mem, 0, 1808 nvgpu_mem_rd_n(g, gold_mem, 0,
1809 gr->ctx_vars.local_golden_image, 1809 gr->ctx_vars.local_golden_image,
1810 gr->ctx_vars.golden_image_size); 1810 gr->ctx_vars.golden_image_size);
1811 } 1811 }
1812 gk20a_mem_end(g, ctxheader); 1812 nvgpu_mem_end(g, ctxheader);
1813 1813
1814 g->ops.gr.commit_inst(c, gr_mem->gpu_va); 1814 g->ops.gr.commit_inst(c, gr_mem->gpu_va);
1815 1815
@@ -1824,8 +1824,8 @@ clean_up:
1824 else 1824 else
1825 gk20a_dbg_fn("done"); 1825 gk20a_dbg_fn("done");
1826 1826
1827 gk20a_mem_end(g, gold_mem); 1827 nvgpu_mem_end(g, gold_mem);
1828 gk20a_mem_end(g, gr_mem); 1828 nvgpu_mem_end(g, gr_mem);
1829 1829
1830 nvgpu_mutex_release(&gr->ctx_mutex); 1830 nvgpu_mutex_release(&gr->ctx_mutex);
1831 return err; 1831 return err;
@@ -1865,22 +1865,22 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1865 Flush and invalidate before cpu update. */ 1865 Flush and invalidate before cpu update. */
1866 g->ops.mm.l2_flush(g, true); 1866 g->ops.mm.l2_flush(g, true);
1867 1867
1868 if (gk20a_mem_begin(g, mem)) { 1868 if (nvgpu_mem_begin(g, mem)) {
1869 ret = -ENOMEM; 1869 ret = -ENOMEM;
1870 goto out; 1870 goto out;
1871 } 1871 }
1872 1872
1873 data = gk20a_mem_rd(g, mem, 1873 data = nvgpu_mem_rd(g, mem,
1874 ctxsw_prog_main_image_pm_o()); 1874 ctxsw_prog_main_image_pm_o());
1875 data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m(); 1875 data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m();
1876 data |= enable_smpc_ctxsw ? 1876 data |= enable_smpc_ctxsw ?
1877 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() : 1877 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() :
1878 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(); 1878 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f();
1879 gk20a_mem_wr(g, mem, 1879 nvgpu_mem_wr(g, mem,
1880 ctxsw_prog_main_image_pm_o(), 1880 ctxsw_prog_main_image_pm_o(),
1881 data); 1881 data);
1882 1882
1883 gk20a_mem_end(g, mem); 1883 nvgpu_mem_end(g, mem);
1884 1884
1885out: 1885out:
1886 gk20a_enable_channel_tsg(g, c); 1886 gk20a_enable_channel_tsg(g, c);
@@ -1964,27 +1964,27 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1964 } 1964 }
1965 1965
1966 /* Now clear the buffer */ 1966 /* Now clear the buffer */
1967 if (gk20a_mem_begin(g, &pm_ctx->mem)) { 1967 if (nvgpu_mem_begin(g, &pm_ctx->mem)) {
1968 ret = -ENOMEM; 1968 ret = -ENOMEM;
1969 goto cleanup_pm_buf; 1969 goto cleanup_pm_buf;
1970 } 1970 }
1971 1971
1972 gk20a_memset(g, &pm_ctx->mem, 0, 0, pm_ctx->mem.size); 1972 nvgpu_memset(g, &pm_ctx->mem, 0, 0, pm_ctx->mem.size);
1973 1973
1974 gk20a_mem_end(g, &pm_ctx->mem); 1974 nvgpu_mem_end(g, &pm_ctx->mem);
1975 } 1975 }
1976 1976
1977 if (gk20a_mem_begin(g, gr_mem)) { 1977 if (nvgpu_mem_begin(g, gr_mem)) {
1978 ret = -ENOMEM; 1978 ret = -ENOMEM;
1979 goto cleanup_pm_buf; 1979 goto cleanup_pm_buf;
1980 } 1980 }
1981 1981
1982 if (gk20a_mem_begin(g, ctxheader)) { 1982 if (nvgpu_mem_begin(g, ctxheader)) {
1983 ret = -ENOMEM; 1983 ret = -ENOMEM;
1984 goto clean_up_mem; 1984 goto clean_up_mem;
1985 } 1985 }
1986 1986
1987 data = gk20a_mem_rd(g, gr_mem, ctxsw_prog_main_image_pm_o()); 1987 data = nvgpu_mem_rd(g, gr_mem, ctxsw_prog_main_image_pm_o());
1988 data = data & ~ctxsw_prog_main_image_pm_mode_m(); 1988 data = data & ~ctxsw_prog_main_image_pm_mode_m();
1989 1989
1990 if (enable_hwpm_ctxsw) { 1990 if (enable_hwpm_ctxsw) {
@@ -1998,22 +1998,22 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1998 1998
1999 data |= pm_ctx->pm_mode; 1999 data |= pm_ctx->pm_mode;
2000 2000
2001 gk20a_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data); 2001 nvgpu_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data);
2002 2002
2003 if (ctxheader->gpu_va) 2003 if (ctxheader->gpu_va)
2004 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr); 2004 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr);
2005 else 2005 else
2006 g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr); 2006 g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr);
2007 2007
2008 gk20a_mem_end(g, ctxheader); 2008 nvgpu_mem_end(g, ctxheader);
2009 gk20a_mem_end(g, gr_mem); 2009 nvgpu_mem_end(g, gr_mem);
2010 2010
2011 /* enable channel */ 2011 /* enable channel */
2012 gk20a_enable_channel_tsg(g, c); 2012 gk20a_enable_channel_tsg(g, c);
2013 2013
2014 return 0; 2014 return 0;
2015clean_up_mem: 2015clean_up_mem:
2016 gk20a_mem_end(g, gr_mem); 2016 nvgpu_mem_end(g, gr_mem);
2017cleanup_pm_buf: 2017cleanup_pm_buf:
2018 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 2018 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
2019 gk20a_mem_flag_none); 2019 gk20a_mem_flag_none);
@@ -2048,10 +2048,10 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2048 Flush and invalidate before cpu update. */ 2048 Flush and invalidate before cpu update. */
2049 g->ops.mm.l2_flush(g, true); 2049 g->ops.mm.l2_flush(g, true);
2050 2050
2051 if (gk20a_mem_begin(g, mem)) 2051 if (nvgpu_mem_begin(g, mem))
2052 return -ENOMEM; 2052 return -ENOMEM;
2053 2053
2054 if (gk20a_mem_begin(g, ctxheader)) { 2054 if (nvgpu_mem_begin(g, ctxheader)) {
2055 ret = -ENOMEM; 2055 ret = -ENOMEM;
2056 goto clean_up_mem; 2056 goto clean_up_mem;
2057 } 2057 }
@@ -2060,12 +2060,12 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2060 if (g->ops.gr.restore_context_header) 2060 if (g->ops.gr.restore_context_header)
2061 g->ops.gr.restore_context_header(g, ctxheader); 2061 g->ops.gr.restore_context_header(g, ctxheader);
2062 } else { 2062 } else {
2063 gk20a_mem_wr_n(g, mem, 0, 2063 nvgpu_mem_wr_n(g, mem, 0,
2064 gr->ctx_vars.local_golden_image, 2064 gr->ctx_vars.local_golden_image,
2065 gr->ctx_vars.golden_image_size); 2065 gr->ctx_vars.golden_image_size);
2066 gk20a_mem_wr(g, mem, 2066 nvgpu_mem_wr(g, mem,
2067 ctxsw_prog_main_image_num_save_ops_o(), 0); 2067 ctxsw_prog_main_image_num_save_ops_o(), 0);
2068 gk20a_mem_wr(g, mem, 2068 nvgpu_mem_wr(g, mem,
2069 ctxsw_prog_main_image_num_restore_ops_o(), 0); 2069 ctxsw_prog_main_image_num_restore_ops_o(), 0);
2070 } 2070 }
2071 2071
@@ -2083,29 +2083,29 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2083 else 2083 else
2084 data = ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(); 2084 data = ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f();
2085 2085
2086 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_priv_access_map_config_o(), 2086 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_priv_access_map_config_o(),
2087 data); 2087 data);
2088 2088
2089 if (ctxheader->gpu_va) { 2089 if (ctxheader->gpu_va) {
2090 gk20a_mem_wr(g, ctxheader, 2090 nvgpu_mem_wr(g, ctxheader,
2091 ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 2091 ctxsw_prog_main_image_priv_access_map_addr_lo_o(),
2092 virt_addr_lo); 2092 virt_addr_lo);
2093 gk20a_mem_wr(g, ctxheader, 2093 nvgpu_mem_wr(g, ctxheader,
2094 ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 2094 ctxsw_prog_main_image_priv_access_map_addr_hi_o(),
2095 virt_addr_hi); 2095 virt_addr_hi);
2096 } else { 2096 } else {
2097 gk20a_mem_wr(g, mem, 2097 nvgpu_mem_wr(g, mem,
2098 ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 2098 ctxsw_prog_main_image_priv_access_map_addr_lo_o(),
2099 virt_addr_lo); 2099 virt_addr_lo);
2100 gk20a_mem_wr(g, mem, 2100 nvgpu_mem_wr(g, mem,
2101 ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 2101 ctxsw_prog_main_image_priv_access_map_addr_hi_o(),
2102 virt_addr_hi); 2102 virt_addr_hi);
2103 } 2103 }
2104 /* disable verif features */ 2104 /* disable verif features */
2105 v = gk20a_mem_rd(g, mem, ctxsw_prog_main_image_misc_options_o()); 2105 v = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_misc_options_o());
2106 v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m()); 2106 v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m());
2107 v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f(); 2107 v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f();
2108 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_misc_options_o(), v); 2108 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_misc_options_o(), v);
2109 2109
2110 if (g->ops.gr.update_ctxsw_preemption_mode) 2110 if (g->ops.gr.update_ctxsw_preemption_mode)
2111 g->ops.gr.update_ctxsw_preemption_mode(g, ch_ctx, mem); 2111 g->ops.gr.update_ctxsw_preemption_mode(g, ch_ctx, mem);
@@ -2116,26 +2116,26 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2116 virt_addr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va); 2116 virt_addr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va);
2117 virt_addr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va); 2117 virt_addr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va);
2118 2118
2119 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_count_o(), 2119 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_count_o(),
2120 ch_ctx->patch_ctx.data_count); 2120 ch_ctx->patch_ctx.data_count);
2121 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_lo_o(), 2121 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_lo_o(),
2122 virt_addr_lo); 2122 virt_addr_lo);
2123 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_hi_o(), 2123 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_hi_o(),
2124 virt_addr_hi); 2124 virt_addr_hi);
2125 2125
2126 if (ctxheader->gpu_va) { 2126 if (ctxheader->gpu_va) {
2127 gk20a_mem_wr(g, ctxheader, 2127 nvgpu_mem_wr(g, ctxheader,
2128 ctxsw_prog_main_image_patch_count_o(), 2128 ctxsw_prog_main_image_patch_count_o(),
2129 ch_ctx->patch_ctx.data_count); 2129 ch_ctx->patch_ctx.data_count);
2130 gk20a_mem_wr(g, ctxheader, 2130 nvgpu_mem_wr(g, ctxheader,
2131 ctxsw_prog_main_image_patch_adr_lo_o(), 2131 ctxsw_prog_main_image_patch_adr_lo_o(),
2132 virt_addr_lo); 2132 virt_addr_lo);
2133 gk20a_mem_wr(g, ctxheader, 2133 nvgpu_mem_wr(g, ctxheader,
2134 ctxsw_prog_main_image_patch_adr_hi_o(), 2134 ctxsw_prog_main_image_patch_adr_hi_o(),
2135 virt_addr_hi); 2135 virt_addr_hi);
2136 } 2136 }
2137 2137
2138 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_zcull_o(), 2138 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_zcull_o(),
2139 ch_ctx->zcull_ctx.ctx_sw_mode); 2139 ch_ctx->zcull_ctx.ctx_sw_mode);
2140 2140
2141 if (ctxheader->gpu_va) 2141 if (ctxheader->gpu_va)
@@ -2153,7 +2153,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2153 if (ch_ctx->pm_ctx.mem.gpu_va == 0) { 2153 if (ch_ctx->pm_ctx.mem.gpu_va == 0) {
2154 gk20a_err(dev_from_gk20a(g), 2154 gk20a_err(dev_from_gk20a(g),
2155 "context switched pm with no pm buffer!"); 2155 "context switched pm with no pm buffer!");
2156 gk20a_mem_end(g, mem); 2156 nvgpu_mem_end(g, mem);
2157 return -EFAULT; 2157 return -EFAULT;
2158 } 2158 }
2159 2159
@@ -2161,11 +2161,11 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2161 } else 2161 } else
2162 virt_addr = 0; 2162 virt_addr = 0;
2163 2163
2164 data = gk20a_mem_rd(g, mem, ctxsw_prog_main_image_pm_o()); 2164 data = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_pm_o());
2165 data = data & ~ctxsw_prog_main_image_pm_mode_m(); 2165 data = data & ~ctxsw_prog_main_image_pm_mode_m();
2166 data |= ch_ctx->pm_ctx.pm_mode; 2166 data |= ch_ctx->pm_ctx.pm_mode;
2167 2167
2168 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_pm_o(), data); 2168 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_pm_o(), data);
2169 2169
2170 if (ctxheader->gpu_va) 2170 if (ctxheader->gpu_va)
2171 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr); 2171 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr);
@@ -2173,9 +2173,9 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2173 g->ops.gr.write_pm_ptr(g, mem, virt_addr); 2173 g->ops.gr.write_pm_ptr(g, mem, virt_addr);
2174 2174
2175 2175
2176 gk20a_mem_end(g, ctxheader); 2176 nvgpu_mem_end(g, ctxheader);
2177clean_up_mem: 2177clean_up_mem:
2178 gk20a_mem_end(g, mem); 2178 nvgpu_mem_end(g, mem);
2179 2179
2180 return ret; 2180 return ret;
2181} 2181}
@@ -2256,11 +2256,11 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
2256{ 2256{
2257 unsigned int i; 2257 unsigned int i;
2258 2258
2259 gk20a_mem_wr_n(g, dst, segments->boot.offset, bootimage, 2259 nvgpu_mem_wr_n(g, dst, segments->boot.offset, bootimage,
2260 segments->boot.size); 2260 segments->boot.size);
2261 gk20a_mem_wr_n(g, dst, segments->code.offset, code, 2261 nvgpu_mem_wr_n(g, dst, segments->code.offset, code,
2262 segments->code.size); 2262 segments->code.size);
2263 gk20a_mem_wr_n(g, dst, segments->data.offset, data, 2263 nvgpu_mem_wr_n(g, dst, segments->data.offset, data,
2264 segments->data.size); 2264 segments->data.size);
2265 2265
2266 /* compute a "checksum" for the boot binary to detect its version */ 2266 /* compute a "checksum" for the boot binary to detect its version */
@@ -2382,14 +2382,14 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2382 inst_ptr = gk20a_mm_inst_block_addr(g, &ucode_info->inst_blk_desc); 2382 inst_ptr = gk20a_mm_inst_block_addr(g, &ucode_info->inst_blk_desc);
2383 gk20a_writel(g, gr_fecs_new_ctx_r(), 2383 gk20a_writel(g, gr_fecs_new_ctx_r(),
2384 gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) | 2384 gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) |
2385 gk20a_aperture_mask(g, &ucode_info->inst_blk_desc, 2385 nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
2386 gr_fecs_new_ctx_target_sys_mem_ncoh_f(), 2386 gr_fecs_new_ctx_target_sys_mem_ncoh_f(),
2387 gr_fecs_new_ctx_target_vid_mem_f()) | 2387 gr_fecs_new_ctx_target_vid_mem_f()) |
2388 gr_fecs_new_ctx_valid_m()); 2388 gr_fecs_new_ctx_valid_m());
2389 2389
2390 gk20a_writel(g, gr_fecs_arb_ctx_ptr_r(), 2390 gk20a_writel(g, gr_fecs_arb_ctx_ptr_r(),
2391 gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) | 2391 gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) |
2392 gk20a_aperture_mask(g, &ucode_info->inst_blk_desc, 2392 nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
2393 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(), 2393 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(),
2394 gr_fecs_arb_ctx_ptr_target_vid_mem_f())); 2394 gr_fecs_arb_ctx_ptr_target_vid_mem_f()));
2395 2395
@@ -4748,7 +4748,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4748 addr >>= fb_mmu_debug_wr_addr_alignment_v(); 4748 addr >>= fb_mmu_debug_wr_addr_alignment_v();
4749 4749
4750 gk20a_writel(g, fb_mmu_debug_wr_r(), 4750 gk20a_writel(g, fb_mmu_debug_wr_r(),
4751 gk20a_aperture_mask(g, &gr->mmu_wr_mem, 4751 nvgpu_aperture_mask(g, &gr->mmu_wr_mem,
4752 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(), 4752 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
4753 fb_mmu_debug_wr_aperture_vid_mem_f()) | 4753 fb_mmu_debug_wr_aperture_vid_mem_f()) |
4754 fb_mmu_debug_wr_vol_false_f() | 4754 fb_mmu_debug_wr_vol_false_f() |
@@ -4758,7 +4758,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4758 addr >>= fb_mmu_debug_rd_addr_alignment_v(); 4758 addr >>= fb_mmu_debug_rd_addr_alignment_v();
4759 4759
4760 gk20a_writel(g, fb_mmu_debug_rd_r(), 4760 gk20a_writel(g, fb_mmu_debug_rd_r(),
4761 gk20a_aperture_mask(g, &gr->mmu_rd_mem, 4761 nvgpu_aperture_mask(g, &gr->mmu_rd_mem,
4762 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(), 4762 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
4763 fb_mmu_debug_rd_aperture_vid_mem_f()) | 4763 fb_mmu_debug_rd_aperture_vid_mem_f()) |
4764 fb_mmu_debug_rd_vol_false_f() | 4764 fb_mmu_debug_rd_vol_false_f() |
@@ -5092,13 +5092,13 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
5092 u32 *whitelist = NULL; 5092 u32 *whitelist = NULL;
5093 unsigned int num_entries = 0; 5093 unsigned int num_entries = 0;
5094 5094
5095 if (gk20a_mem_begin(g, mem)) { 5095 if (nvgpu_mem_begin(g, mem)) {
5096 gk20a_err(dev_from_gk20a(g), 5096 gk20a_err(dev_from_gk20a(g),
5097 "failed to map priv access map memory"); 5097 "failed to map priv access map memory");
5098 return -ENOMEM; 5098 return -ENOMEM;
5099 } 5099 }
5100 5100
5101 gk20a_memset(g, mem, 0, 0, PAGE_SIZE * nr_pages); 5101 nvgpu_memset(g, mem, 0, 0, PAGE_SIZE * nr_pages);
5102 5102
5103 g->ops.gr.get_access_map(g, &whitelist, &num_entries); 5103 g->ops.gr.get_access_map(g, &whitelist, &num_entries);
5104 5104
@@ -5109,14 +5109,14 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
5109 map_shift = map_bit & 0x7; /* i.e. 0-7 */ 5109 map_shift = map_bit & 0x7; /* i.e. 0-7 */
5110 gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d", 5110 gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d",
5111 whitelist[w], map_byte, map_shift); 5111 whitelist[w], map_byte, map_shift);
5112 x = gk20a_mem_rd32(g, mem, map_byte / sizeof(u32)); 5112 x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32));
5113 x |= 1 << ( 5113 x |= 1 << (
5114 (map_byte % sizeof(u32) * BITS_PER_BYTE) 5114 (map_byte % sizeof(u32) * BITS_PER_BYTE)
5115 + map_shift); 5115 + map_shift);
5116 gk20a_mem_wr32(g, mem, map_byte / sizeof(u32), x); 5116 nvgpu_mem_wr32(g, mem, map_byte / sizeof(u32), x);
5117 } 5117 }
5118 5118
5119 gk20a_mem_end(g, mem); 5119 nvgpu_mem_end(g, mem);
5120 return 0; 5120 return 0;
5121} 5121}
5122 5122
@@ -7160,7 +7160,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
7160 /* reset the patch count from previous 7160 /* reset the patch count from previous
7161 runs,if ucode has already processed 7161 runs,if ucode has already processed
7162 it */ 7162 it */
7163 tmp = gk20a_mem_rd(g, mem, 7163 tmp = nvgpu_mem_rd(g, mem,
7164 ctxsw_prog_main_image_patch_count_o()); 7164 ctxsw_prog_main_image_patch_count_o());
7165 7165
7166 if (!tmp) 7166 if (!tmp)
@@ -7172,13 +7172,13 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
7172 vaddr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va); 7172 vaddr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va);
7173 vaddr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va); 7173 vaddr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va);
7174 7174
7175 gk20a_mem_wr(g, mem, 7175 nvgpu_mem_wr(g, mem,
7176 ctxsw_prog_main_image_patch_count_o(), 7176 ctxsw_prog_main_image_patch_count_o(),
7177 ch_ctx->patch_ctx.data_count); 7177 ch_ctx->patch_ctx.data_count);
7178 gk20a_mem_wr(g, mem, 7178 nvgpu_mem_wr(g, mem,
7179 ctxsw_prog_main_image_patch_adr_lo_o(), 7179 ctxsw_prog_main_image_patch_adr_lo_o(),
7180 vaddr_lo); 7180 vaddr_lo);
7181 gk20a_mem_wr(g, mem, 7181 nvgpu_mem_wr(g, mem,
7182 ctxsw_prog_main_image_patch_adr_hi_o(), 7182 ctxsw_prog_main_image_patch_adr_hi_o(),
7183 vaddr_hi); 7183 vaddr_hi);
7184 7184
@@ -8393,7 +8393,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8393 * gr_gk20a_apply_instmem_overrides, 8393 * gr_gk20a_apply_instmem_overrides,
8394 * recoded in-place instead. 8394 * recoded in-place instead.
8395 */ 8395 */
8396 if (gk20a_mem_begin(g, &ch_ctx->gr_ctx->mem)) { 8396 if (nvgpu_mem_begin(g, &ch_ctx->gr_ctx->mem)) {
8397 err = -ENOMEM; 8397 err = -ENOMEM;
8398 goto cleanup; 8398 goto cleanup;
8399 } 8399 }
@@ -8422,7 +8422,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8422 err = -EINVAL; 8422 err = -EINVAL;
8423 goto cleanup; 8423 goto cleanup;
8424 } 8424 }
8425 if (gk20a_mem_begin(g, &ch_ctx->pm_ctx.mem)) { 8425 if (nvgpu_mem_begin(g, &ch_ctx->pm_ctx.mem)) {
8426 err = -ENOMEM; 8426 err = -ENOMEM;
8427 goto cleanup; 8427 goto cleanup;
8428 } 8428 }
@@ -8445,20 +8445,20 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8445 (offsets[j] >= g->gr.ctx_vars.golden_image_size)) 8445 (offsets[j] >= g->gr.ctx_vars.golden_image_size))
8446 continue; 8446 continue;
8447 if (pass == 0) { /* write pass */ 8447 if (pass == 0) { /* write pass */
8448 v = gk20a_mem_rd(g, current_mem, offsets[j]); 8448 v = nvgpu_mem_rd(g, current_mem, offsets[j]);
8449 v &= ~ctx_ops[i].and_n_mask_lo; 8449 v &= ~ctx_ops[i].and_n_mask_lo;
8450 v |= ctx_ops[i].value_lo; 8450 v |= ctx_ops[i].value_lo;
8451 gk20a_mem_wr(g, current_mem, offsets[j], v); 8451 nvgpu_mem_wr(g, current_mem, offsets[j], v);
8452 8452
8453 gk20a_dbg(gpu_dbg_gpu_dbg, 8453 gk20a_dbg(gpu_dbg_gpu_dbg,
8454 "context wr: offset=0x%x v=0x%x", 8454 "context wr: offset=0x%x v=0x%x",
8455 offsets[j], v); 8455 offsets[j], v);
8456 8456
8457 if (ctx_ops[i].op == REGOP(WRITE_64)) { 8457 if (ctx_ops[i].op == REGOP(WRITE_64)) {
8458 v = gk20a_mem_rd(g, current_mem, offsets[j] + 4); 8458 v = nvgpu_mem_rd(g, current_mem, offsets[j] + 4);
8459 v &= ~ctx_ops[i].and_n_mask_hi; 8459 v &= ~ctx_ops[i].and_n_mask_hi;
8460 v |= ctx_ops[i].value_hi; 8460 v |= ctx_ops[i].value_hi;
8461 gk20a_mem_wr(g, current_mem, offsets[j] + 4, v); 8461 nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v);
8462 8462
8463 gk20a_dbg(gpu_dbg_gpu_dbg, 8463 gk20a_dbg(gpu_dbg_gpu_dbg,
8464 "context wr: offset=0x%x v=0x%x", 8464 "context wr: offset=0x%x v=0x%x",
@@ -8472,14 +8472,14 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8472 8472
8473 } else { /* read pass */ 8473 } else { /* read pass */
8474 ctx_ops[i].value_lo = 8474 ctx_ops[i].value_lo =
8475 gk20a_mem_rd(g, current_mem, offsets[0]); 8475 nvgpu_mem_rd(g, current_mem, offsets[0]);
8476 8476
8477 gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", 8477 gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
8478 offsets[0], ctx_ops[i].value_lo); 8478 offsets[0], ctx_ops[i].value_lo);
8479 8479
8480 if (ctx_ops[i].op == REGOP(READ_64)) { 8480 if (ctx_ops[i].op == REGOP(READ_64)) {
8481 ctx_ops[i].value_hi = 8481 ctx_ops[i].value_hi =
8482 gk20a_mem_rd(g, current_mem, offsets[0] + 4); 8482 nvgpu_mem_rd(g, current_mem, offsets[0] + 4);
8483 8483
8484 gk20a_dbg(gpu_dbg_gpu_dbg, 8484 gk20a_dbg(gpu_dbg_gpu_dbg,
8485 "context rd: offset=0x%x v=0x%x", 8485 "context rd: offset=0x%x v=0x%x",
@@ -8507,9 +8507,9 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8507 if (ch_ctx->patch_ctx.mem.cpu_va) 8507 if (ch_ctx->patch_ctx.mem.cpu_va)
8508 gr_gk20a_ctx_patch_write_end(g, ch_ctx); 8508 gr_gk20a_ctx_patch_write_end(g, ch_ctx);
8509 if (gr_ctx_ready) 8509 if (gr_ctx_ready)
8510 gk20a_mem_end(g, &ch_ctx->gr_ctx->mem); 8510 nvgpu_mem_end(g, &ch_ctx->gr_ctx->mem);
8511 if (pm_ctx_ready) 8511 if (pm_ctx_ready)
8512 gk20a_mem_end(g, &ch_ctx->pm_ctx.mem); 8512 nvgpu_mem_end(g, &ch_ctx->pm_ctx.mem);
8513 8513
8514 if (restart_gr_ctxsw) { 8514 if (restart_gr_ctxsw) {
8515 int tmp_err = gr_gk20a_enable_ctxsw(g); 8515 int tmp_err = gr_gk20a_enable_ctxsw(g);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index e78eb941..9c9fad1b 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -34,6 +34,7 @@
34#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/pramin.h> 35#include <nvgpu/pramin.h>
36#include <nvgpu/list.h> 36#include <nvgpu/list.h>
37#include <nvgpu/mem_desc.h>
37#include <nvgpu/allocator.h> 38#include <nvgpu/allocator.h>
38#include <nvgpu/semaphore.h> 39#include <nvgpu/semaphore.h>
39#include <nvgpu/page_allocator.h> 40#include <nvgpu/page_allocator.h>
@@ -139,7 +140,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
139 bool umapped_pte, int rw_flag, 140 bool umapped_pte, int rw_flag,
140 bool sparse, 141 bool sparse,
141 bool priv, 142 bool priv,
142 enum gk20a_aperture aperture); 143 enum nvgpu_aperture aperture);
143static int __must_check gk20a_init_system_vm(struct mm_gk20a *mm); 144static int __must_check gk20a_init_system_vm(struct mm_gk20a *mm);
144static int __must_check gk20a_init_bar1_vm(struct mm_gk20a *mm); 145static int __must_check gk20a_init_bar1_vm(struct mm_gk20a *mm);
145static int __must_check gk20a_init_hwpm(struct mm_gk20a *mm); 146static int __must_check gk20a_init_hwpm(struct mm_gk20a *mm);
@@ -945,7 +946,7 @@ int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
945 sg_phys(entry->mem.sgt->sgl), 946 sg_phys(entry->mem.sgt->sgl),
946 entry->mem.size); 947 entry->mem.size);
947 } else { 948 } else {
948 int err = gk20a_mem_begin(g, &entry->mem); 949 int err = nvgpu_mem_begin(g, &entry->mem);
949 950
950 if (err) 951 if (err)
951 return err; 952 return err;
@@ -971,7 +972,7 @@ void unmap_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
971 sg_phys(entry->mem.sgt->sgl), 972 sg_phys(entry->mem.sgt->sgl),
972 entry->mem.size); 973 entry->mem.size);
973 } else { 974 } else {
974 gk20a_mem_end(g, &entry->mem); 975 nvgpu_mem_end(g, &entry->mem);
975 } 976 }
976} 977}
977 978
@@ -1510,7 +1511,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1510 bool sparse, 1511 bool sparse,
1511 bool priv, 1512 bool priv,
1512 struct vm_gk20a_mapping_batch *batch, 1513 struct vm_gk20a_mapping_batch *batch,
1513 enum gk20a_aperture aperture) 1514 enum nvgpu_aperture aperture)
1514{ 1515{
1515 int err = 0; 1516 int err = 0;
1516 bool allocated = false; 1517 bool allocated = false;
@@ -1543,7 +1544,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1543 sgt ? lo32((u64)sg_phys(sgt->sgl)) : 0, 1544 sgt ? lo32((u64)sg_phys(sgt->sgl)) : 0,
1544 vm->gmmu_page_sizes[pgsz_idx] >> 10, vm_aspace_id(vm), 1545 vm->gmmu_page_sizes[pgsz_idx] >> 10, vm_aspace_id(vm),
1545 ctag_lines, ctag_offset, 1546 ctag_lines, ctag_offset,
1546 kind_v, flags, gk20a_aperture_str(aperture)); 1547 kind_v, flags, nvgpu_aperture_str(aperture));
1547 1548
1548 err = update_gmmu_ptes_locked(vm, pgsz_idx, 1549 err = update_gmmu_ptes_locked(vm, pgsz_idx,
1549 sgt, 1550 sgt,
@@ -1634,7 +1635,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1634 } 1635 }
1635} 1636}
1636 1637
1637static enum gk20a_aperture gk20a_dmabuf_aperture(struct gk20a *g, 1638static enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
1638 struct dma_buf *dmabuf) 1639 struct dma_buf *dmabuf)
1639{ 1640{
1640 struct gk20a *buf_owner = gk20a_vidmem_buf_owner(dmabuf); 1641 struct gk20a *buf_owner = gk20a_vidmem_buf_owner(dmabuf);
@@ -1723,7 +1724,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
1723 vm_aspace_id(vm), 1724 vm_aspace_id(vm),
1724 mapped_buffer->ctag_lines, mapped_buffer->ctag_offset, 1725 mapped_buffer->ctag_lines, mapped_buffer->ctag_offset,
1725 mapped_buffer->flags, 1726 mapped_buffer->flags,
1726 gk20a_aperture_str(gk20a_dmabuf_aperture(g, dmabuf))); 1727 nvgpu_aperture_str(gk20a_dmabuf_aperture(g, dmabuf)));
1727 1728
1728 if (sgt) 1729 if (sgt)
1729 *sgt = mapped_buffer->sgt; 1730 *sgt = mapped_buffer->sgt;
@@ -1941,11 +1942,11 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
1941 1942
1942 switch (cmd) { 1943 switch (cmd) {
1943 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ: 1944 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ:
1944 gk20a_mem_rd_n(g, mem, offset, buffer, size); 1945 nvgpu_mem_rd_n(g, mem, offset, buffer, size);
1945 break; 1946 break;
1946 1947
1947 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE: 1948 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE:
1948 gk20a_mem_wr_n(g, mem, offset, buffer, size); 1949 nvgpu_mem_wr_n(g, mem, offset, buffer, size);
1949 break; 1950 break;
1950 1951
1951 default: 1952 default:
@@ -1959,7 +1960,7 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
1959} 1960}
1960 1961
1961static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl, 1962static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl,
1962 enum gk20a_aperture aperture) 1963 enum nvgpu_aperture aperture)
1963{ 1964{
1964 u64 align = 0, chunk_align = 0; 1965 u64 align = 0, chunk_align = 0;
1965 u64 buf_addr; 1966 u64 buf_addr;
@@ -2030,7 +2031,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2030 u32 ctag_map_win_ctagline = 0; 2031 u32 ctag_map_win_ctagline = 0;
2031 struct vm_reserved_va_node *va_node = NULL; 2032 struct vm_reserved_va_node *va_node = NULL;
2032 u32 ctag_offset; 2033 u32 ctag_offset;
2033 enum gk20a_aperture aperture; 2034 enum nvgpu_aperture aperture;
2034 2035
2035 if (user_mapped && vm->userspace_managed && 2036 if (user_mapped && vm->userspace_managed &&
2036 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { 2037 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) {
@@ -2462,7 +2463,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
2462 u32 flags, 2463 u32 flags,
2463 int rw_flag, 2464 int rw_flag,
2464 bool priv, 2465 bool priv,
2465 enum gk20a_aperture aperture) 2466 enum nvgpu_aperture aperture)
2466{ 2467{
2467 struct gk20a *g = gk20a_from_vm(vm); 2468 struct gk20a *g = gk20a_from_vm(vm);
2468 u64 vaddr; 2469 u64 vaddr;
@@ -2496,7 +2497,7 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm,
2496 u32 flags, 2497 u32 flags,
2497 int rw_flag, 2498 int rw_flag,
2498 bool priv, 2499 bool priv,
2499 enum gk20a_aperture aperture) 2500 enum nvgpu_aperture aperture)
2500{ 2501{
2501 return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv, 2502 return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv,
2502 aperture); 2503 aperture);
@@ -2512,7 +2513,7 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
2512 u32 flags, 2513 u32 flags,
2513 int rw_flag, 2514 int rw_flag,
2514 bool priv, 2515 bool priv,
2515 enum gk20a_aperture aperture) 2516 enum nvgpu_aperture aperture)
2516{ 2517{
2517 return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv, 2518 return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv,
2518 aperture); 2519 aperture);
@@ -2851,7 +2852,7 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem)
2851 schedule_work(&g->mm.vidmem.clear_mem_worker); 2852 schedule_work(&g->mm.vidmem.clear_mem_worker);
2852 } 2853 }
2853 } else { 2854 } else {
2854 gk20a_memset(g, mem, 0, 0, mem->size); 2855 nvgpu_memset(g, mem, 0, 0, mem->size);
2855 nvgpu_free(mem->allocator, 2856 nvgpu_free(mem->allocator,
2856 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 2857 (u64)get_vidmem_page_alloc(mem->sgt->sgl));
2857 gk20a_free_sgtable(g, &mem->sgt); 2858 gk20a_free_sgtable(g, &mem->sgt);
@@ -3170,7 +3171,7 @@ u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
3170void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, 3171void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry,
3171 size_t w, size_t data) 3172 size_t w, size_t data)
3172{ 3173{
3173 gk20a_mem_wr32(g, &entry->mem, entry->woffset + w, data); 3174 nvgpu_mem_wr32(g, &entry->mem, entry->woffset + w, data);
3174} 3175}
3175 3176
3176u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry) 3177u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry)
@@ -3191,7 +3192,7 @@ static inline u32 big_valid_pde0_bits(struct gk20a *g,
3191{ 3192{
3192 u64 pte_addr = gk20a_pde_addr(g, entry); 3193 u64 pte_addr = gk20a_pde_addr(g, entry);
3193 u32 pde0_bits = 3194 u32 pde0_bits =
3194 gk20a_aperture_mask(g, &entry->mem, 3195 nvgpu_aperture_mask(g, &entry->mem,
3195 gmmu_pde_aperture_big_sys_mem_ncoh_f(), 3196 gmmu_pde_aperture_big_sys_mem_ncoh_f(),
3196 gmmu_pde_aperture_big_video_memory_f()) | 3197 gmmu_pde_aperture_big_video_memory_f()) |
3197 gmmu_pde_address_big_sys_f( 3198 gmmu_pde_address_big_sys_f(
@@ -3205,7 +3206,7 @@ static inline u32 small_valid_pde1_bits(struct gk20a *g,
3205{ 3206{
3206 u64 pte_addr = gk20a_pde_addr(g, entry); 3207 u64 pte_addr = gk20a_pde_addr(g, entry);
3207 u32 pde1_bits = 3208 u32 pde1_bits =
3208 gk20a_aperture_mask(g, &entry->mem, 3209 nvgpu_aperture_mask(g, &entry->mem,
3209 gmmu_pde_aperture_small_sys_mem_ncoh_f(), 3210 gmmu_pde_aperture_small_sys_mem_ncoh_f(),
3210 gmmu_pde_aperture_small_video_memory_f()) | 3211 gmmu_pde_aperture_small_video_memory_f()) |
3211 gmmu_pde_vol_small_true_f() | /* tbd: why? */ 3212 gmmu_pde_vol_small_true_f() | /* tbd: why? */
@@ -3230,7 +3231,7 @@ static int update_gmmu_pde_locked(struct vm_gk20a *vm,
3230 u32 kind_v, u64 *ctag, 3231 u32 kind_v, u64 *ctag,
3231 bool cacheable, bool unammped_pte, 3232 bool cacheable, bool unammped_pte,
3232 int rw_flag, bool sparse, bool priv, 3233 int rw_flag, bool sparse, bool priv,
3233 enum gk20a_aperture aperture) 3234 enum nvgpu_aperture aperture)
3234{ 3235{
3235 struct gk20a *g = gk20a_from_vm(vm); 3236 struct gk20a *g = gk20a_from_vm(vm);
3236 bool small_valid, big_valid; 3237 bool small_valid, big_valid;
@@ -3275,7 +3276,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
3275 u32 kind_v, u64 *ctag, 3276 u32 kind_v, u64 *ctag,
3276 bool cacheable, bool unmapped_pte, 3277 bool cacheable, bool unmapped_pte,
3277 int rw_flag, bool sparse, bool priv, 3278 int rw_flag, bool sparse, bool priv,
3278 enum gk20a_aperture aperture) 3279 enum nvgpu_aperture aperture)
3279{ 3280{
3280 struct gk20a *g = gk20a_from_vm(vm); 3281 struct gk20a *g = gk20a_from_vm(vm);
3281 int ctag_shift = ilog2(g->ops.fb.compression_page_size(g)); 3282 int ctag_shift = ilog2(g->ops.fb.compression_page_size(g));
@@ -3296,7 +3297,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
3296 if (priv) 3297 if (priv)
3297 pte_w[0] |= gmmu_pte_privilege_true_f(); 3298 pte_w[0] |= gmmu_pte_privilege_true_f();
3298 3299
3299 pte_w[1] = __gk20a_aperture_mask(g, aperture, 3300 pte_w[1] = __nvgpu_aperture_mask(g, aperture,
3300 gmmu_pte_aperture_sys_mem_ncoh_f(), 3301 gmmu_pte_aperture_sys_mem_ncoh_f(),
3301 gmmu_pte_aperture_video_memory_f()) | 3302 gmmu_pte_aperture_video_memory_f()) |
3302 gmmu_pte_kind_f(kind_v) | 3303 gmmu_pte_kind_f(kind_v) |
@@ -3379,7 +3380,7 @@ static int update_gmmu_level_locked(struct vm_gk20a *vm,
3379 bool sparse, 3380 bool sparse,
3380 int lvl, 3381 int lvl,
3381 bool priv, 3382 bool priv,
3382 enum gk20a_aperture aperture) 3383 enum nvgpu_aperture aperture)
3383{ 3384{
3384 struct gk20a *g = gk20a_from_vm(vm); 3385 struct gk20a *g = gk20a_from_vm(vm);
3385 const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl]; 3386 const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl];
@@ -3477,7 +3478,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3477 int rw_flag, 3478 int rw_flag,
3478 bool sparse, 3479 bool sparse,
3479 bool priv, 3480 bool priv,
3480 enum gk20a_aperture aperture) 3481 enum nvgpu_aperture aperture)
3481{ 3482{
3482 struct gk20a *g = gk20a_from_vm(vm); 3483 struct gk20a *g = gk20a_from_vm(vm);
3483 int ctag_granularity = g->ops.fb.compression_page_size(g); 3484 int ctag_granularity = g->ops.fb.compression_page_size(g);
@@ -4735,14 +4736,14 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
4735 4736
4736 gk20a_dbg_info("pde pa=0x%llx", pdb_addr); 4737 gk20a_dbg_info("pde pa=0x%llx", pdb_addr);
4737 4738
4738 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), 4739 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
4739 gk20a_aperture_mask(g, &vm->pdb.mem, 4740 nvgpu_aperture_mask(g, &vm->pdb.mem,
4740 ram_in_page_dir_base_target_sys_mem_ncoh_f(), 4741 ram_in_page_dir_base_target_sys_mem_ncoh_f(),
4741 ram_in_page_dir_base_target_vid_mem_f()) | 4742 ram_in_page_dir_base_target_vid_mem_f()) |
4742 ram_in_page_dir_base_vol_true_f() | 4743 ram_in_page_dir_base_vol_true_f() |
4743 ram_in_page_dir_base_lo_f(pdb_addr_lo)); 4744 ram_in_page_dir_base_lo_f(pdb_addr_lo));
4744 4745
4745 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), 4746 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(),
4746 ram_in_page_dir_base_hi_f(pdb_addr_hi)); 4747 ram_in_page_dir_base_hi_f(pdb_addr_hi));
4747} 4748}
4748 4749
@@ -4756,10 +4757,10 @@ void gk20a_init_inst_block(struct mem_desc *inst_block, struct vm_gk20a *vm,
4756 4757
4757 g->ops.mm.init_pdb(g, inst_block, vm); 4758 g->ops.mm.init_pdb(g, inst_block, vm);
4758 4759
4759 gk20a_mem_wr32(g, inst_block, ram_in_adr_limit_lo_w(), 4760 nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_lo_w(),
4760 u64_lo32(vm->va_limit - 1) & ~0xfff); 4761 u64_lo32(vm->va_limit - 1) & ~0xfff);
4761 4762
4762 gk20a_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(), 4763 nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(),
4763 ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1))); 4764 ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1)));
4764 4765
4765 if (big_page_size && g->ops.mm.set_big_page_size) 4766 if (big_page_size && g->ops.mm.set_big_page_size)
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index da8bbb0a..3c701907 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -220,7 +220,7 @@ struct gk20a_mmu_level {
220 u32 kind_v, u64 *ctag, 220 u32 kind_v, u64 *ctag,
221 bool cacheable, bool unmapped_pte, 221 bool cacheable, bool unmapped_pte,
222 int rw_flag, bool sparse, bool priv, 222 int rw_flag, bool sparse, bool priv,
223 enum gk20a_aperture aperture); 223 enum nvgpu_aperture aperture);
224 size_t entry_size; 224 size_t entry_size;
225}; 225};
226 226
@@ -514,7 +514,7 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm,
514 u32 flags, 514 u32 flags,
515 int rw_flag, 515 int rw_flag,
516 bool priv, 516 bool priv,
517 enum gk20a_aperture aperture); 517 enum nvgpu_aperture aperture);
518u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm, 518u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
519 struct sg_table **sgt, 519 struct sg_table **sgt,
520 u64 addr, 520 u64 addr,
@@ -522,7 +522,7 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
522 u32 flags, 522 u32 flags,
523 int rw_flag, 523 int rw_flag,
524 bool priv, 524 bool priv,
525 enum gk20a_aperture aperture); 525 enum nvgpu_aperture aperture);
526 526
527/* Flags for the below gk20a_gmmu_{alloc,alloc_map}_flags* */ 527/* Flags for the below gk20a_gmmu_{alloc,alloc_map}_flags* */
528 528
@@ -589,9 +589,9 @@ static inline phys_addr_t gk20a_mem_phys(struct mem_desc *mem)
589 return 0; 589 return 0;
590} 590}
591 591
592u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture, 592u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
593 u32 sysmem_mask, u32 vidmem_mask); 593 u32 sysmem_mask, u32 vidmem_mask);
594u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem, 594u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem,
595 u32 sysmem_mask, u32 vidmem_mask); 595 u32 sysmem_mask, u32 vidmem_mask);
596 596
597void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, 597void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry,
@@ -612,7 +612,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
612 bool sparse, 612 bool sparse,
613 bool priv, 613 bool priv,
614 struct vm_gk20a_mapping_batch *batch, 614 struct vm_gk20a_mapping_batch *batch,
615 enum gk20a_aperture aperture); 615 enum nvgpu_aperture aperture);
616 616
617void gk20a_gmmu_unmap(struct vm_gk20a *vm, 617void gk20a_gmmu_unmap(struct vm_gk20a *vm,
618 u64 vaddr, 618 u64 vaddr,
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 3297d376..e70e50c2 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -307,7 +307,7 @@ static void printtrace(struct pmu_gk20a *pmu)
307 return; 307 return;
308 308
309 /* read pmu traces into system memory buffer */ 309 /* read pmu traces into system memory buffer */
310 gk20a_mem_rd_n(g, &pmu->trace_buf, 310 nvgpu_mem_rd_n(g, &pmu->trace_buf,
311 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE); 311 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE);
312 312
313 trace = (char *)tracebuffer; 313 trace = (char *)tracebuffer;
@@ -3155,7 +3155,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
3155 if (err) 3155 if (err)
3156 goto err_release_fw; 3156 goto err_release_fw;
3157 3157
3158 gk20a_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image, 3158 nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image,
3159 pmu->desc->app_start_offset + pmu->desc->app_size); 3159 pmu->desc->app_start_offset + pmu->desc->app_size);
3160 3160
3161 return gk20a_init_pmu(pmu); 3161 return gk20a_init_pmu(pmu);
@@ -4872,7 +4872,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4872 (struct flcn_mem_desc_v0 *) 4872 (struct flcn_mem_desc_v0 *)
4873 pv->pmu_allocation_get_fb_addr(pmu, in)); 4873 pv->pmu_allocation_get_fb_addr(pmu, in));
4874 4874
4875 gk20a_mem_wr_n(g, seq->in_mem, 0, 4875 nvgpu_mem_wr_n(g, seq->in_mem, 0,
4876 payload->in.buf, payload->in.fb_size); 4876 payload->in.buf, payload->in.fb_size);
4877 4877
4878 } else { 4878 } else {
@@ -5736,7 +5736,7 @@ static int falc_trace_show(struct seq_file *s, void *data)
5736 return -ENOMEM; 5736 return -ENOMEM;
5737 5737
5738 /* read pmu traces into system memory buffer */ 5738 /* read pmu traces into system memory buffer */
5739 gk20a_mem_rd_n(g, &pmu->trace_buf, 5739 nvgpu_mem_rd_n(g, &pmu->trace_buf,
5740 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE); 5740 0, tracebuffer, GK20A_PMU_TRACE_BUFSIZE);
5741 5741
5742 trace = (char *)tracebuffer; 5742 trace = (char *)tracebuffer;
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
index bed2e9b5..7e6005a2 100644
--- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
@@ -32,7 +32,7 @@ static u32 gk20a_pramin_enter(struct gk20a *g, struct mem_desc *mem,
32 u32 hi = (u32)((addr & ~(u64)0xfffff) 32 u32 hi = (u32)((addr & ~(u64)0xfffff)
33 >> bus_bar0_window_target_bar0_window_base_shift_v()); 33 >> bus_bar0_window_target_bar0_window_base_shift_v());
34 u32 lo = (u32)(addr & 0xfffff); 34 u32 lo = (u32)(addr & 0xfffff);
35 u32 win = gk20a_aperture_mask(g, mem, 35 u32 win = nvgpu_aperture_mask(g, mem,
36 bus_bar0_window_target_sys_mem_noncoherent_f(), 36 bus_bar0_window_target_sys_mem_noncoherent_f(),
37 bus_bar0_window_target_vid_mem_f()) | 37 bus_bar0_window_target_vid_mem_f()) |
38 bus_bar0_window_base_f(hi); 38 bus_bar0_window_base_f(hi);
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 360cfc33..da3adb72 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -723,7 +723,7 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
723 */ 723 */
724 while (pnode) { 724 while (pnode) {
725 /* Flush WPR header to memory*/ 725 /* Flush WPR header to memory*/
726 gk20a_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), 726 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
727 &pnode->wpr_header, sizeof(pnode->wpr_header)); 727 &pnode->wpr_header, sizeof(pnode->wpr_header));
728 728
729 gm20b_dbg_pmu("wpr header"); 729 gm20b_dbg_pmu("wpr header");
@@ -739,7 +739,7 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
739 pnode->wpr_header.status); 739 pnode->wpr_header.status);
740 740
741 /*Flush LSB header to memory*/ 741 /*Flush LSB header to memory*/
742 gk20a_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, 742 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
743 &pnode->lsb_header, sizeof(pnode->lsb_header)); 743 &pnode->lsb_header, sizeof(pnode->lsb_header));
744 744
745 gm20b_dbg_pmu("lsb header"); 745 gm20b_dbg_pmu("lsb header");
@@ -773,13 +773,13 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
773 if (!pnode->ucode_img.header) { 773 if (!pnode->ucode_img.header) {
774 /*Populate gen bl and flush to memory*/ 774 /*Populate gen bl and flush to memory*/
775 lsfm_fill_flcn_bl_gen_desc(g, pnode); 775 lsfm_fill_flcn_bl_gen_desc(g, pnode);
776 gk20a_mem_wr_n(g, ucode, 776 nvgpu_mem_wr_n(g, ucode,
777 pnode->lsb_header.bl_data_off, 777 pnode->lsb_header.bl_data_off,
778 &pnode->bl_gen_desc, 778 &pnode->bl_gen_desc,
779 pnode->bl_gen_desc_size); 779 pnode->bl_gen_desc_size);
780 } 780 }
781 /*Copying of ucode*/ 781 /*Copying of ucode*/
782 gk20a_mem_wr_n(g, ucode, pnode->lsb_header.ucode_off, 782 nvgpu_mem_wr_n(g, ucode, pnode->lsb_header.ucode_off,
783 pnode->ucode_img.data, 783 pnode->ucode_img.data,
784 pnode->ucode_img.data_size); 784 pnode->ucode_img.data_size);
785 pnode = pnode->next; 785 pnode = pnode->next;
@@ -787,7 +787,7 @@ static void lsfm_init_wpr_contents(struct gk20a *g, struct ls_flcn_mgr *plsfm,
787 } 787 }
788 788
789 /* Tag the terminator WPR header with an invalid falcon ID. */ 789 /* Tag the terminator WPR header with an invalid falcon ID. */
790 gk20a_mem_wr32(g, ucode, 790 nvgpu_mem_wr32(g, ucode,
791 plsfm->managed_flcn_cnt * sizeof(struct lsf_wpr_header) + 791 plsfm->managed_flcn_cnt * sizeof(struct lsf_wpr_header) +
792 offsetof(struct lsf_wpr_header, falcon_id), 792 offsetof(struct lsf_wpr_header, falcon_id),
793 LSF_FALCON_ID_INVALID); 793 LSF_FALCON_ID_INVALID);
@@ -1133,7 +1133,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
1133 ((struct flcn_acr_desc *)acr_dmem)->regions.no_regions = 2; 1133 ((struct flcn_acr_desc *)acr_dmem)->regions.no_regions = 2;
1134 ((struct flcn_acr_desc *)acr_dmem)->wpr_offset = 0; 1134 ((struct flcn_acr_desc *)acr_dmem)->wpr_offset = 0;
1135 1135
1136 gk20a_mem_wr_n(g, &acr->acr_ucode, 0, 1136 nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
1137 acr_ucode_data_t210_load, img_size_in_bytes); 1137 acr_ucode_data_t210_load, img_size_in_bytes);
1138 /* 1138 /*
1139 * In order to execute this binary, we will be using 1139 * In order to execute this binary, we will be using
@@ -1433,7 +1433,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
1433 goto err_free_ucode; 1433 goto err_free_ucode;
1434 } 1434 }
1435 1435
1436 gk20a_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz); 1436 nvgpu_mem_wr_n(g, &acr->hsbl_ucode, 0, pmu_bl_gm10x, bl_sz);
1437 gm20b_dbg_pmu("Copied bl ucode to bl_cpuva\n"); 1437 gm20b_dbg_pmu("Copied bl ucode to bl_cpuva\n");
1438 } 1438 }
1439 /* 1439 /*
diff --git a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
index 68a4b15f..ba04945b 100644
--- a/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/bus_gm20b.c
@@ -33,7 +33,7 @@ static int gm20b_bus_bar1_bind(struct gk20a *g, struct mem_desc *bar1_inst)
33 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v); 33 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", ptr_v);
34 34
35 gk20a_writel(g, bus_bar1_block_r(), 35 gk20a_writel(g, bus_bar1_block_r(),
36 gk20a_aperture_mask(g, bar1_inst, 36 nvgpu_aperture_mask(g, bar1_inst,
37 bus_bar1_block_target_sys_mem_ncoh_f(), 37 bus_bar1_block_target_sys_mem_ncoh_f(),
38 bus_bar1_block_target_vid_mem_f()) | 38 bus_bar1_block_target_vid_mem_f()) |
39 bus_bar1_block_mode_virtual_f() | 39 bus_bar1_block_mode_virtual_f() |
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index edf962de..6c34689b 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -41,7 +41,7 @@ static void channel_gm20b_bind(struct channel_gk20a *c)
41 41
42 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid), 42 gk20a_writel(g, ccsr_channel_inst_r(c->hw_chid),
43 ccsr_channel_inst_ptr_f(inst_ptr) | 43 ccsr_channel_inst_ptr_f(inst_ptr) |
44 gk20a_aperture_mask(g, &c->inst_block, 44 nvgpu_aperture_mask(g, &c->inst_block,
45 ccsr_channel_inst_target_sys_mem_ncoh_f(), 45 ccsr_channel_inst_target_sys_mem_ncoh_f(),
46 ccsr_channel_inst_target_vid_mem_f()) | 46 ccsr_channel_inst_target_vid_mem_f()) |
47 ccsr_channel_inst_bind_true_f()); 47 ccsr_channel_inst_bind_true_f());
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index a5dbe23d..57bff64f 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -862,7 +862,7 @@ static void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
862 862
863 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) { 863 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) {
864 gk20a_dbg_info("CTA: %x", cta_preempt_option); 864 gk20a_dbg_info("CTA: %x", cta_preempt_option);
865 gk20a_mem_wr(g, mem, 865 nvgpu_mem_wr(g, mem,
866 ctxsw_prog_main_image_preemption_options_o(), 866 ctxsw_prog_main_image_preemption_options_o(),
867 cta_preempt_option); 867 cta_preempt_option);
868 } 868 }
@@ -1022,15 +1022,15 @@ static int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
1022 1022
1023 mem = &ch_ctx->gr_ctx->mem; 1023 mem = &ch_ctx->gr_ctx->mem;
1024 1024
1025 if (gk20a_mem_begin(c->g, mem)) 1025 if (nvgpu_mem_begin(c->g, mem))
1026 return -ENOMEM; 1026 return -ENOMEM;
1027 1027
1028 v = gk20a_mem_rd(c->g, mem, ctxsw_prog_main_image_pm_o()); 1028 v = nvgpu_mem_rd(c->g, mem, ctxsw_prog_main_image_pm_o());
1029 v &= ~ctxsw_prog_main_image_pm_pc_sampling_m(); 1029 v &= ~ctxsw_prog_main_image_pm_pc_sampling_m();
1030 v |= ctxsw_prog_main_image_pm_pc_sampling_f(enable); 1030 v |= ctxsw_prog_main_image_pm_pc_sampling_f(enable);
1031 gk20a_mem_wr(c->g, mem, ctxsw_prog_main_image_pm_o(), v); 1031 nvgpu_mem_wr(c->g, mem, ctxsw_prog_main_image_pm_o(), v);
1032 1032
1033 gk20a_mem_end(c->g, mem); 1033 nvgpu_mem_end(c->g, mem);
1034 1034
1035 gk20a_dbg_fn("done"); 1035 gk20a_dbg_fn("done");
1036 1036
@@ -1112,9 +1112,9 @@ static void gr_gm20b_enable_cde_in_fecs(struct gk20a *g, struct mem_desc *mem)
1112{ 1112{
1113 u32 cde_v; 1113 u32 cde_v;
1114 1114
1115 cde_v = gk20a_mem_rd(g, mem, ctxsw_prog_main_image_ctl_o()); 1115 cde_v = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_ctl_o());
1116 cde_v |= ctxsw_prog_main_image_ctl_cde_enabled_f(); 1116 cde_v |= ctxsw_prog_main_image_ctl_cde_enabled_f();
1117 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_ctl_o(), cde_v); 1117 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_ctl_o(), cde_v);
1118} 1118}
1119 1119
1120static void gr_gm20b_bpt_reg_info(struct gk20a *g, struct warpstate *w_state) 1120static void gr_gm20b_bpt_reg_info(struct gk20a *g, struct warpstate *w_state)
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index 949a5c5d..08d446e7 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -28,7 +28,7 @@ static void gm20b_mm_set_big_page_size(struct gk20a *g,
28 gk20a_dbg_fn(""); 28 gk20a_dbg_fn("");
29 29
30 gk20a_dbg_info("big page size %d\n", size); 30 gk20a_dbg_info("big page size %d\n", size);
31 val = gk20a_mem_rd32(g, mem, ram_in_big_page_size_w()); 31 val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w());
32 val &= ~ram_in_big_page_size_m(); 32 val &= ~ram_in_big_page_size_m();
33 33
34 if (size == SZ_64K) 34 if (size == SZ_64K)
@@ -36,7 +36,7 @@ static void gm20b_mm_set_big_page_size(struct gk20a *g,
36 else 36 else
37 val |= ram_in_big_page_size_128kb_f(); 37 val |= ram_in_big_page_size_128kb_f();
38 38
39 gk20a_mem_wr32(g, mem, ram_in_big_page_size_w(), val); 39 nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val);
40 gk20a_dbg_fn("done"); 40 gk20a_dbg_fn("done");
41} 41}
42 42
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c
index c7638d3e..ee709ed1 100644
--- a/drivers/gpu/nvgpu/gp106/acr_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c
@@ -701,7 +701,7 @@ static void lsfm_init_wpr_contents(struct gk20a *g,
701 */ 701 */
702 while (pnode) { 702 while (pnode) {
703 /* Flush WPR header to memory*/ 703 /* Flush WPR header to memory*/
704 gk20a_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header), 704 nvgpu_mem_wr_n(g, ucode, i * sizeof(pnode->wpr_header),
705 &pnode->wpr_header, sizeof(pnode->wpr_header)); 705 &pnode->wpr_header, sizeof(pnode->wpr_header));
706 706
707 gp106_dbg_pmu("wpr header"); 707 gp106_dbg_pmu("wpr header");
@@ -717,7 +717,7 @@ static void lsfm_init_wpr_contents(struct gk20a *g,
717 pnode->wpr_header.status); 717 pnode->wpr_header.status);
718 718
719 /*Flush LSB header to memory*/ 719 /*Flush LSB header to memory*/
720 gk20a_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset, 720 nvgpu_mem_wr_n(g, ucode, pnode->wpr_header.lsb_offset,
721 &pnode->lsb_header, sizeof(pnode->lsb_header)); 721 &pnode->lsb_header, sizeof(pnode->lsb_header));
722 722
723 gp106_dbg_pmu("lsb header"); 723 gp106_dbg_pmu("lsb header");
@@ -751,13 +751,13 @@ static void lsfm_init_wpr_contents(struct gk20a *g,
751 if (!pnode->ucode_img.header) { 751 if (!pnode->ucode_img.header) {
752 /*Populate gen bl and flush to memory*/ 752 /*Populate gen bl and flush to memory*/
753 lsfm_fill_flcn_bl_gen_desc(g, pnode); 753 lsfm_fill_flcn_bl_gen_desc(g, pnode);
754 gk20a_mem_wr_n(g, ucode, 754 nvgpu_mem_wr_n(g, ucode,
755 pnode->lsb_header.bl_data_off, 755 pnode->lsb_header.bl_data_off,
756 &pnode->bl_gen_desc, 756 &pnode->bl_gen_desc,
757 pnode->bl_gen_desc_size); 757 pnode->bl_gen_desc_size);
758 } 758 }
759 /*Copying of ucode*/ 759 /*Copying of ucode*/
760 gk20a_mem_wr_n(g, ucode, pnode->lsb_header.ucode_off, 760 nvgpu_mem_wr_n(g, ucode, pnode->lsb_header.ucode_off,
761 pnode->ucode_img.data, 761 pnode->ucode_img.data,
762 pnode->ucode_img.data_size); 762 pnode->ucode_img.data_size);
763 pnode = pnode->next; 763 pnode = pnode->next;
@@ -765,7 +765,7 @@ static void lsfm_init_wpr_contents(struct gk20a *g,
765 } 765 }
766 766
767 /* Tag the terminator WPR header with an invalid falcon ID. */ 767 /* Tag the terminator WPR header with an invalid falcon ID. */
768 gk20a_mem_wr32(g, ucode, 768 nvgpu_mem_wr32(g, ucode,
769 plsfm->managed_flcn_cnt * sizeof(struct lsf_wpr_header) + 769 plsfm->managed_flcn_cnt * sizeof(struct lsf_wpr_header) +
770 offsetof(struct lsf_wpr_header, falcon_id), 770 offsetof(struct lsf_wpr_header, falcon_id),
771 LSF_FALCON_ID_INVALID); 771 LSF_FALCON_ID_INVALID);
@@ -1124,7 +1124,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g)
1124 ((struct flcn_acr_desc_v1 *)acr_dmem)->regions.region_props[ 1124 ((struct flcn_acr_desc_v1 *)acr_dmem)->regions.region_props[
1125 0].shadowmMem_startaddress = wpr_inf.nonwpr_base >> 8; 1125 0].shadowmMem_startaddress = wpr_inf.nonwpr_base >> 8;
1126 1126
1127 gk20a_mem_wr_n(g, &acr->acr_ucode, 0, 1127 nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
1128 acr_ucode_data_t210_load, img_size_in_bytes); 1128 acr_ucode_data_t210_load, img_size_in_bytes);
1129 1129
1130 /* 1130 /*
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index dd67f882..5a331480 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -170,7 +170,7 @@ int bl_bootstrap_sec2(struct pmu_gk20a *pmu,
170 pwr_pmu_new_instblk_ptr_f( 170 pwr_pmu_new_instblk_ptr_f(
171 gk20a_mm_inst_block_addr(g, &mm->pmu.inst_block) >> 12) | 171 gk20a_mm_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
172 pwr_pmu_new_instblk_valid_f(1) | 172 pwr_pmu_new_instblk_valid_f(1) |
173 gk20a_aperture_mask(g, &mm->pmu.inst_block, 173 nvgpu_aperture_mask(g, &mm->pmu.inst_block,
174 pwr_pmu_new_instblk_target_sys_coh_f(), 174 pwr_pmu_new_instblk_target_sys_coh_f(),
175 pwr_pmu_new_instblk_target_fb_f())); 175 pwr_pmu_new_instblk_target_fb_f()));
176 176
@@ -315,7 +315,7 @@ void init_pmu_setup_hw1(struct gk20a *g)
315 pwr_pmu_new_instblk_ptr_f( 315 pwr_pmu_new_instblk_ptr_f(
316 gk20a_mm_inst_block_addr(g, &mm->pmu.inst_block) >> 12) | 316 gk20a_mm_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
317 pwr_pmu_new_instblk_valid_f(1) | 317 pwr_pmu_new_instblk_valid_f(1) |
318 gk20a_aperture_mask(g, &mm->pmu.inst_block, 318 nvgpu_aperture_mask(g, &mm->pmu.inst_block,
319 pwr_pmu_new_instblk_target_sys_coh_f(), 319 pwr_pmu_new_instblk_target_sys_coh_f(),
320 pwr_pmu_new_instblk_target_fb_f())); 320 pwr_pmu_new_instblk_target_fb_f()));
321 321
diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
index 6f1a0298..3787662b 100644
--- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c
@@ -33,18 +33,18 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g,
33 33
34 gk20a_dbg_fn(""); 34 gk20a_dbg_fn("");
35 35
36 val = gk20a_mem_rd32(g, mem, 36 val = nvgpu_mem_rd32(g, mem,
37 ram_in_page_dir_base_fault_replay_tex_w()); 37 ram_in_page_dir_base_fault_replay_tex_w());
38 val &= ~ram_in_page_dir_base_fault_replay_tex_m(); 38 val &= ~ram_in_page_dir_base_fault_replay_tex_m();
39 val |= ram_in_page_dir_base_fault_replay_tex_true_f(); 39 val |= ram_in_page_dir_base_fault_replay_tex_true_f();
40 gk20a_mem_wr32(g, mem, 40 nvgpu_mem_wr32(g, mem,
41 ram_in_page_dir_base_fault_replay_tex_w(), val); 41 ram_in_page_dir_base_fault_replay_tex_w(), val);
42 42
43 val = gk20a_mem_rd32(g, mem, 43 val = nvgpu_mem_rd32(g, mem,
44 ram_in_page_dir_base_fault_replay_gcc_w()); 44 ram_in_page_dir_base_fault_replay_gcc_w());
45 val &= ~ram_in_page_dir_base_fault_replay_gcc_m(); 45 val &= ~ram_in_page_dir_base_fault_replay_gcc_m();
46 val |= ram_in_page_dir_base_fault_replay_gcc_true_f(); 46 val |= ram_in_page_dir_base_fault_replay_gcc_true_f();
47 gk20a_mem_wr32(g, mem, 47 nvgpu_mem_wr32(g, mem,
48 ram_in_page_dir_base_fault_replay_gcc_w(), val); 48 ram_in_page_dir_base_fault_replay_gcc_w(), val);
49 49
50 gk20a_dbg_fn("done"); 50 gk20a_dbg_fn("done");
@@ -64,14 +64,14 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c)
64 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", 64 gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
65 c->hw_chid, (u64)c->userd_iova); 65 c->hw_chid, (u64)c->userd_iova);
66 66
67 gk20a_mem_wr32(g, &c->inst_block, 67 nvgpu_mem_wr32(g, &c->inst_block,
68 ram_in_ramfc_w() + ram_fc_userd_w(), 68 ram_in_ramfc_w() + ram_fc_userd_w(),
69 (g->mm.vidmem_is_vidmem ? 69 (g->mm.vidmem_is_vidmem ?
70 pbdma_userd_target_sys_mem_ncoh_f() : 70 pbdma_userd_target_sys_mem_ncoh_f() :
71 pbdma_userd_target_vid_mem_f()) | 71 pbdma_userd_target_vid_mem_f()) |
72 pbdma_userd_addr_f(addr_lo)); 72 pbdma_userd_addr_f(addr_lo));
73 73
74 gk20a_mem_wr32(g, &c->inst_block, 74 nvgpu_mem_wr32(g, &c->inst_block,
75 ram_in_ramfc_w() + ram_fc_userd_hi_w(), 75 ram_in_ramfc_w() + ram_fc_userd_hi_w(),
76 pbdma_userd_hi_addr_f(addr_hi)); 76 pbdma_userd_hi_addr_f(addr_hi));
77 77
@@ -87,25 +87,25 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
87 87
88 gk20a_dbg_fn(""); 88 gk20a_dbg_fn("");
89 89
90 gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v()); 90 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
91 91
92 gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(), 92 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
93 pbdma_gp_base_offset_f( 93 pbdma_gp_base_offset_f(
94 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); 94 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
95 95
96 gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), 96 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
97 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | 97 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
98 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); 98 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
99 99
100 gk20a_mem_wr32(g, mem, ram_fc_signature_w(), 100 nvgpu_mem_wr32(g, mem, ram_fc_signature_w(),
101 c->g->ops.fifo.get_pbdma_signature(c->g)); 101 c->g->ops.fifo.get_pbdma_signature(c->g));
102 102
103 gk20a_mem_wr32(g, mem, ram_fc_formats_w(), 103 nvgpu_mem_wr32(g, mem, ram_fc_formats_w(),
104 pbdma_formats_gp_fermi0_f() | 104 pbdma_formats_gp_fermi0_f() |
105 pbdma_formats_pb_fermi1_f() | 105 pbdma_formats_pb_fermi1_f() |
106 pbdma_formats_mp_fermi0_f()); 106 pbdma_formats_mp_fermi0_f());
107 107
108 gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(), 108 nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(),
109 pbdma_pb_header_priv_user_f() | 109 pbdma_pb_header_priv_user_f() |
110 pbdma_pb_header_method_zero_f() | 110 pbdma_pb_header_method_zero_f() |
111 pbdma_pb_header_subchannel_zero_f() | 111 pbdma_pb_header_subchannel_zero_f() |
@@ -113,17 +113,17 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
113 pbdma_pb_header_first_true_f() | 113 pbdma_pb_header_first_true_f() |
114 pbdma_pb_header_type_inc_f()); 114 pbdma_pb_header_type_inc_f());
115 115
116 gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(), 116 nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(),
117 pbdma_subdevice_id_f(1) | 117 pbdma_subdevice_id_f(1) |
118 pbdma_subdevice_status_active_f() | 118 pbdma_subdevice_status_active_f() |
119 pbdma_subdevice_channel_dma_enable_f()); 119 pbdma_subdevice_channel_dma_enable_f());
120 120
121 gk20a_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); 121 nvgpu_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f());
122 122
123 gk20a_mem_wr32(g, mem, ram_fc_acquire_w(), 123 nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(),
124 g->ops.fifo.pbdma_acquire_val(acquire_timeout)); 124 g->ops.fifo.pbdma_acquire_val(acquire_timeout));
125 125
126 gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), 126 nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
127 pbdma_runlist_timeslice_timeout_128_f() | 127 pbdma_runlist_timeslice_timeout_128_f() |
128 pbdma_runlist_timeslice_timescale_3_f() | 128 pbdma_runlist_timeslice_timescale_3_f() |
129 pbdma_runlist_timeslice_enable_true_f()); 129 pbdma_runlist_timeslice_enable_true_f());
@@ -132,11 +132,11 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c,
132 gp10b_set_pdb_fault_replay_flags(c->g, mem); 132 gp10b_set_pdb_fault_replay_flags(c->g, mem);
133 133
134 134
135 gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); 135 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
136 136
137 if (c->is_privileged_channel) { 137 if (c->is_privileged_channel) {
138 /* Set privilege level for channel */ 138 /* Set privilege level for channel */
139 gk20a_mem_wr32(g, mem, ram_fc_config_w(), 139 nvgpu_mem_wr32(g, mem, ram_fc_config_w(),
140 pbdma_config_auth_level_privileged_f()); 140 pbdma_config_auth_level_privileged_f());
141 141
142 gk20a_fifo_setup_ramfc_for_privileged_channel(c); 142 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
@@ -158,7 +158,7 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
158 158
159 gk20a_dbg_fn(""); 159 gk20a_dbg_fn("");
160 160
161 v = gk20a_mem_rd32(c->g, &c->inst_block, 161 v = nvgpu_mem_rd32(c->g, &c->inst_block,
162 ram_fc_allowed_syncpoints_w()); 162 ram_fc_allowed_syncpoints_w());
163 old_syncpt = pbdma_allowed_syncpoints_0_index_v(v); 163 old_syncpt = pbdma_allowed_syncpoints_0_index_v(v);
164 if (c->sync) 164 if (c->sync)
@@ -178,7 +178,7 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
178 178
179 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); 179 v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt);
180 180
181 gk20a_mem_wr32(c->g, &c->inst_block, 181 nvgpu_mem_wr32(c->g, &c->inst_block,
182 ram_fc_allowed_syncpoints_w(), v); 182 ram_fc_allowed_syncpoints_w(), v);
183 } 183 }
184 184
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 95590e40..fc831e75 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1039,51 +1039,51 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
1039{ 1039{
1040 struct mem_desc *mem = &gr_ctx->mem; 1040 struct mem_desc *mem = &gr_ctx->mem;
1041 1041
1042 if (gk20a_mem_begin(g, mem)) { 1042 if (nvgpu_mem_begin(g, mem)) {
1043 WARN_ON("Cannot map context"); 1043 WARN_ON("Cannot map context");
1044 return; 1044 return;
1045 } 1045 }
1046 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", 1046 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n",
1047 gk20a_mem_rd(g, mem, 1047 nvgpu_mem_rd(g, mem,
1048 ctxsw_prog_main_image_magic_value_o()), 1048 ctxsw_prog_main_image_magic_value_o()),
1049 ctxsw_prog_main_image_magic_value_v_value_v()); 1049 ctxsw_prog_main_image_magic_value_v_value_v());
1050 1050
1051 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n", 1051 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n",
1052 gk20a_mem_rd(g, mem, 1052 nvgpu_mem_rd(g, mem,
1053 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o())); 1053 ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
1054 1054
1055 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n", 1055 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n",
1056 gk20a_mem_rd(g, mem, 1056 nvgpu_mem_rd(g, mem,
1057 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o())); 1057 ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
1058 1058
1059 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n", 1059 gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n",
1060 gk20a_mem_rd(g, mem, 1060 nvgpu_mem_rd(g, mem,
1061 ctxsw_prog_main_image_context_timestamp_buffer_control_o())); 1061 ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
1062 1062
1063 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", 1063 gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n",
1064 gk20a_mem_rd(g, mem, 1064 nvgpu_mem_rd(g, mem,
1065 ctxsw_prog_main_image_num_save_ops_o())); 1065 ctxsw_prog_main_image_num_save_ops_o()));
1066 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", 1066 gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n",
1067 gk20a_mem_rd(g, mem, 1067 nvgpu_mem_rd(g, mem,
1068 ctxsw_prog_main_image_num_wfi_save_ops_o())); 1068 ctxsw_prog_main_image_num_wfi_save_ops_o()));
1069 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", 1069 gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n",
1070 gk20a_mem_rd(g, mem, 1070 nvgpu_mem_rd(g, mem,
1071 ctxsw_prog_main_image_num_cta_save_ops_o())); 1071 ctxsw_prog_main_image_num_cta_save_ops_o()));
1072 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", 1072 gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n",
1073 gk20a_mem_rd(g, mem, 1073 nvgpu_mem_rd(g, mem,
1074 ctxsw_prog_main_image_num_gfxp_save_ops_o())); 1074 ctxsw_prog_main_image_num_gfxp_save_ops_o()));
1075 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", 1075 gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n",
1076 gk20a_mem_rd(g, mem, 1076 nvgpu_mem_rd(g, mem,
1077 ctxsw_prog_main_image_num_cilp_save_ops_o())); 1077 ctxsw_prog_main_image_num_cilp_save_ops_o()));
1078 gk20a_err(dev_from_gk20a(g), 1078 gk20a_err(dev_from_gk20a(g),
1079 "image gfx preemption option (GFXP is 1) %x\n", 1079 "image gfx preemption option (GFXP is 1) %x\n",
1080 gk20a_mem_rd(g, mem, 1080 nvgpu_mem_rd(g, mem,
1081 ctxsw_prog_main_image_graphics_preemption_options_o())); 1081 ctxsw_prog_main_image_graphics_preemption_options_o()));
1082 gk20a_err(dev_from_gk20a(g), 1082 gk20a_err(dev_from_gk20a(g),
1083 "image compute preemption option (CTA is 1) %x\n", 1083 "image compute preemption option (CTA is 1) %x\n",
1084 gk20a_mem_rd(g, mem, 1084 nvgpu_mem_rd(g, mem,
1085 ctxsw_prog_main_image_compute_preemption_options_o())); 1085 ctxsw_prog_main_image_compute_preemption_options_o()));
1086 gk20a_mem_end(g, mem); 1086 nvgpu_mem_end(g, mem);
1087} 1087}
1088 1088
1089static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, 1089static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
@@ -1123,21 +1123,21 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1123 1123
1124 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) { 1124 if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) {
1125 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); 1125 gk20a_dbg_info("GfxP: %x", gfxp_preempt_option);
1126 gk20a_mem_wr(g, mem, 1126 nvgpu_mem_wr(g, mem,
1127 ctxsw_prog_main_image_graphics_preemption_options_o(), 1127 ctxsw_prog_main_image_graphics_preemption_options_o(),
1128 gfxp_preempt_option); 1128 gfxp_preempt_option);
1129 } 1129 }
1130 1130
1131 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) { 1131 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
1132 gk20a_dbg_info("CILP: %x", cilp_preempt_option); 1132 gk20a_dbg_info("CILP: %x", cilp_preempt_option);
1133 gk20a_mem_wr(g, mem, 1133 nvgpu_mem_wr(g, mem,
1134 ctxsw_prog_main_image_compute_preemption_options_o(), 1134 ctxsw_prog_main_image_compute_preemption_options_o(),
1135 cilp_preempt_option); 1135 cilp_preempt_option);
1136 } 1136 }
1137 1137
1138 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) { 1138 if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) {
1139 gk20a_dbg_info("CTA: %x", cta_preempt_option); 1139 gk20a_dbg_info("CTA: %x", cta_preempt_option);
1140 gk20a_mem_wr(g, mem, 1140 nvgpu_mem_wr(g, mem,
1141 ctxsw_prog_main_image_compute_preemption_options_o(), 1141 ctxsw_prog_main_image_compute_preemption_options_o(),
1142 cta_preempt_option); 1142 cta_preempt_option);
1143 } 1143 }
@@ -1147,7 +1147,7 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
1147 u32 size; 1147 u32 size;
1148 u32 cbes_reserve; 1148 u32 cbes_reserve;
1149 1149
1150 gk20a_mem_wr(g, mem, 1150 nvgpu_mem_wr(g, mem,
1151 ctxsw_prog_main_image_full_preemption_ptr_o(), 1151 ctxsw_prog_main_image_full_preemption_ptr_o(),
1152 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8); 1152 gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8);
1153 1153
@@ -2077,7 +2077,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
2077 2077
2078 gr_ctx->boosted_ctx = boost; 2078 gr_ctx->boosted_ctx = boost;
2079 2079
2080 if (gk20a_mem_begin(g, mem)) 2080 if (nvgpu_mem_begin(g, mem))
2081 return -ENOMEM; 2081 return -ENOMEM;
2082 2082
2083 err = gk20a_disable_channel_tsg(g, ch); 2083 err = gk20a_disable_channel_tsg(g, ch);
@@ -2096,7 +2096,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
2096enable_ch: 2096enable_ch:
2097 gk20a_enable_channel_tsg(g, ch); 2097 gk20a_enable_channel_tsg(g, ch);
2098unmap_ctx: 2098unmap_ctx:
2099 gk20a_mem_end(g, mem); 2099 nvgpu_mem_end(g, mem);
2100 2100
2101 return err; 2101 return err;
2102} 2102}
@@ -2107,7 +2107,7 @@ static void gr_gp10b_update_boosted_ctx(struct gk20a *g, struct mem_desc *mem,
2107 2107
2108 v = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f( 2108 v = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f(
2109 gr_ctx->boosted_ctx); 2109 gr_ctx->boosted_ctx);
2110 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_pmu_options_o(), v); 2110 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_pmu_options_o(), v);
2111} 2111}
2112 2112
2113static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, 2113static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
@@ -2164,7 +2164,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2164 } 2164 }
2165 } 2165 }
2166 2166
2167 if (gk20a_mem_begin(g, mem)) 2167 if (nvgpu_mem_begin(g, mem))
2168 return -ENOMEM; 2168 return -ENOMEM;
2169 2169
2170 err = gk20a_disable_channel_tsg(g, ch); 2170 err = gk20a_disable_channel_tsg(g, ch);
@@ -2191,7 +2191,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
2191enable_ch: 2191enable_ch:
2192 gk20a_enable_channel_tsg(g, ch); 2192 gk20a_enable_channel_tsg(g, ch);
2193unmap_ctx: 2193unmap_ctx:
2194 gk20a_mem_end(g, mem); 2194 nvgpu_mem_end(g, mem);
2195 2195
2196 return err; 2196 return err;
2197} 2197}
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index a5322bad..8c6340f0 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -107,7 +107,7 @@ static int gb10b_init_bar2_mm_hw_setup(struct gk20a *g)
107 gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa); 107 gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa);
108 108
109 gk20a_writel(g, bus_bar2_block_r(), 109 gk20a_writel(g, bus_bar2_block_r(),
110 gk20a_aperture_mask(g, inst_block, 110 nvgpu_aperture_mask(g, inst_block,
111 bus_bar2_block_target_sys_mem_ncoh_f(), 111 bus_bar2_block_target_sys_mem_ncoh_f(),
112 bus_bar2_block_target_vid_mem_f()) | 112 bus_bar2_block_target_vid_mem_f()) |
113 bus_bar2_block_mode_virtual_f() | 113 bus_bar2_block_mode_virtual_f() |
@@ -162,7 +162,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
162 u32 kind_v, u64 *ctag, 162 u32 kind_v, u64 *ctag,
163 bool cacheable, bool unmapped_pte, 163 bool cacheable, bool unmapped_pte,
164 int rw_flag, bool sparse, bool priv, 164 int rw_flag, bool sparse, bool priv,
165 enum gk20a_aperture aperture) 165 enum nvgpu_aperture aperture)
166{ 166{
167 struct gk20a *g = gk20a_from_vm(vm); 167 struct gk20a *g = gk20a_from_vm(vm);
168 u64 pte_addr = 0; 168 u64 pte_addr = 0;
@@ -174,7 +174,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
174 174
175 pte_addr = gk20a_pde_addr(g, pte) >> gmmu_new_pde_address_shift_v(); 175 pte_addr = gk20a_pde_addr(g, pte) >> gmmu_new_pde_address_shift_v();
176 176
177 pde_v[0] |= gk20a_aperture_mask(g, &pte->mem, 177 pde_v[0] |= nvgpu_aperture_mask(g, &pte->mem,
178 gmmu_new_pde_aperture_sys_mem_ncoh_f(), 178 gmmu_new_pde_aperture_sys_mem_ncoh_f(),
179 gmmu_new_pde_aperture_video_memory_f()); 179 gmmu_new_pde_aperture_video_memory_f());
180 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr)); 180 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr));
@@ -205,7 +205,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
205 u32 kind_v, u64 *ctag, 205 u32 kind_v, u64 *ctag,
206 bool cacheable, bool unmapped_pte, 206 bool cacheable, bool unmapped_pte,
207 int rw_flag, bool sparse, bool priv, 207 int rw_flag, bool sparse, bool priv,
208 enum gk20a_aperture aperture) 208 enum nvgpu_aperture aperture)
209{ 209{
210 struct gk20a *g = gk20a_from_vm(vm); 210 struct gk20a *g = gk20a_from_vm(vm);
211 bool small_valid, big_valid; 211 bool small_valid, big_valid;
@@ -230,7 +230,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
230 230
231 if (small_valid) { 231 if (small_valid) {
232 pde_v[2] |= gmmu_new_dual_pde_address_small_sys_f(pte_addr_small); 232 pde_v[2] |= gmmu_new_dual_pde_address_small_sys_f(pte_addr_small);
233 pde_v[2] |= gk20a_aperture_mask(g, &entry->mem, 233 pde_v[2] |= nvgpu_aperture_mask(g, &entry->mem,
234 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(), 234 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(),
235 gmmu_new_dual_pde_aperture_small_video_memory_f()); 235 gmmu_new_dual_pde_aperture_small_video_memory_f());
236 pde_v[2] |= gmmu_new_dual_pde_vol_small_true_f(); 236 pde_v[2] |= gmmu_new_dual_pde_vol_small_true_f();
@@ -240,7 +240,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
240 if (big_valid) { 240 if (big_valid) {
241 pde_v[0] |= gmmu_new_dual_pde_address_big_sys_f(pte_addr_big); 241 pde_v[0] |= gmmu_new_dual_pde_address_big_sys_f(pte_addr_big);
242 pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f(); 242 pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f();
243 pde_v[0] |= gk20a_aperture_mask(g, &entry->mem, 243 pde_v[0] |= nvgpu_aperture_mask(g, &entry->mem,
244 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(), 244 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(),
245 gmmu_new_dual_pde_aperture_big_video_memory_f()); 245 gmmu_new_dual_pde_aperture_big_video_memory_f());
246 pde_v[1] |= pte_addr_big >> 28; 246 pde_v[1] |= pte_addr_big >> 28;
@@ -268,7 +268,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
268 u32 kind_v, u64 *ctag, 268 u32 kind_v, u64 *ctag,
269 bool cacheable, bool unmapped_pte, 269 bool cacheable, bool unmapped_pte,
270 int rw_flag, bool sparse, bool priv, 270 int rw_flag, bool sparse, bool priv,
271 enum gk20a_aperture aperture) 271 enum nvgpu_aperture aperture)
272{ 272{
273 struct gk20a *g = vm->mm->g; 273 struct gk20a *g = vm->mm->g;
274 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; 274 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
@@ -284,7 +284,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
284 u32 pte_addr = aperture == APERTURE_SYSMEM ? 284 u32 pte_addr = aperture == APERTURE_SYSMEM ?
285 gmmu_new_pte_address_sys_f(iova_v) : 285 gmmu_new_pte_address_sys_f(iova_v) :
286 gmmu_new_pte_address_vid_f(iova_v); 286 gmmu_new_pte_address_vid_f(iova_v);
287 u32 pte_tgt = __gk20a_aperture_mask(g, aperture, 287 u32 pte_tgt = __nvgpu_aperture_mask(g, aperture,
288 gmmu_new_pte_aperture_sys_mem_ncoh_f(), 288 gmmu_new_pte_aperture_sys_mem_ncoh_f(),
289 gmmu_new_pte_aperture_video_memory_f()); 289 gmmu_new_pte_aperture_video_memory_f());
290 290
@@ -384,15 +384,15 @@ static void gp10b_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
384 384
385 gk20a_dbg_info("pde pa=0x%llx", pdb_addr); 385 gk20a_dbg_info("pde pa=0x%llx", pdb_addr);
386 386
387 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), 387 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
388 gk20a_aperture_mask(g, &vm->pdb.mem, 388 nvgpu_aperture_mask(g, &vm->pdb.mem,
389 ram_in_page_dir_base_target_sys_mem_ncoh_f(), 389 ram_in_page_dir_base_target_sys_mem_ncoh_f(),
390 ram_in_page_dir_base_target_vid_mem_f()) | 390 ram_in_page_dir_base_target_vid_mem_f()) |
391 ram_in_page_dir_base_vol_true_f() | 391 ram_in_page_dir_base_vol_true_f() |
392 ram_in_page_dir_base_lo_f(pdb_addr_lo) | 392 ram_in_page_dir_base_lo_f(pdb_addr_lo) |
393 1 << 10); 393 1 << 10);
394 394
395 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), 395 nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(),
396 ram_in_page_dir_base_hi_f(pdb_addr_hi)); 396 ram_in_page_dir_base_hi_f(pdb_addr_hi));
397} 397}
398 398
diff --git a/drivers/gpu/nvgpu/include/nvgpu/mem_desc.h b/drivers/gpu/nvgpu/include/nvgpu/mem_desc.h
index 528fd7bc..42d8854a 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/mem_desc.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/mem_desc.h
@@ -28,11 +28,11 @@ struct gk20a;
28struct nvgpu_allocator; 28struct nvgpu_allocator;
29 29
30/* 30/*
31 * Real location of a buffer - gk20a_aperture_mask() will deduce what will be 31 * Real location of a buffer - nvgpu_aperture_mask() will deduce what will be
32 * told to the gpu about the aperture, but this flag designates where the 32 * told to the gpu about the aperture, but this flag designates where the
33 * memory actually was allocated from. 33 * memory actually was allocated from.
34 */ 34 */
35enum gk20a_aperture { 35enum nvgpu_aperture {
36 APERTURE_INVALID, /* unallocated or N/A */ 36 APERTURE_INVALID, /* unallocated or N/A */
37 APERTURE_SYSMEM, 37 APERTURE_SYSMEM,
38 APERTURE_VIDMEM 38 APERTURE_VIDMEM
@@ -42,7 +42,7 @@ struct mem_desc {
42 void *cpu_va; /* sysmem only */ 42 void *cpu_va; /* sysmem only */
43 struct page **pages; /* sysmem only */ 43 struct page **pages; /* sysmem only */
44 struct sg_table *sgt; 44 struct sg_table *sgt;
45 enum gk20a_aperture aperture; 45 enum nvgpu_aperture aperture;
46 size_t size; 46 size_t size;
47 u64 gpu_va; 47 u64 gpu_va;
48 bool fixed; /* vidmem only */ 48 bool fixed; /* vidmem only */
@@ -65,7 +65,7 @@ struct mem_desc_sub {
65 u32 size; 65 u32 size;
66}; 66};
67 67
68static inline const char *gk20a_aperture_str(enum gk20a_aperture aperture) 68static inline const char *nvgpu_aperture_str(enum nvgpu_aperture aperture)
69{ 69{
70 switch (aperture) { 70 switch (aperture) {
71 case APERTURE_INVALID: return "invalid"; 71 case APERTURE_INVALID: return "invalid";
@@ -80,32 +80,32 @@ static inline const char *gk20a_aperture_str(enum gk20a_aperture aperture)
80 * kernel mapping for this buffer. 80 * kernel mapping for this buffer.
81 */ 81 */
82 82
83int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem); 83int nvgpu_mem_begin(struct gk20a *g, struct mem_desc *mem);
84/* nop for null mem, like with free() or vunmap() */ 84/* nop for null mem, like with free() or vunmap() */
85void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem); 85void nvgpu_mem_end(struct gk20a *g, struct mem_desc *mem);
86 86
87/* word-indexed offset */ 87/* word-indexed offset */
88u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w); 88u32 nvgpu_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w);
89/* byte offset (32b-aligned) */ 89/* byte offset (32b-aligned) */
90u32 gk20a_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset); 90u32 nvgpu_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset);
91/* memcpy to cpu, offset and size in bytes (32b-aligned) */ 91/* memcpy to cpu, offset and size in bytes (32b-aligned) */
92void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem, u32 offset, 92void nvgpu_mem_rd_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
93 void *dest, u32 size); 93 void *dest, u32 size);
94 94
95/* word-indexed offset */ 95/* word-indexed offset */
96void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data); 96void nvgpu_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data);
97/* byte offset (32b-aligned) */ 97/* byte offset (32b-aligned) */
98void gk20a_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data); 98void nvgpu_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data);
99/* memcpy from cpu, offset and size in bytes (32b-aligned) */ 99/* memcpy from cpu, offset and size in bytes (32b-aligned) */
100void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset, 100void nvgpu_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
101 void *src, u32 size); 101 void *src, u32 size);
102/* size and offset in bytes (32b-aligned), filled with the constant byte c */ 102/* size and offset in bytes (32b-aligned), filled with the constant byte c */
103void gk20a_memset(struct gk20a *g, struct mem_desc *mem, u32 offset, 103void nvgpu_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
104 u32 c, u32 size); 104 u32 c, u32 size);
105 105
106u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture, 106u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
107 u32 sysmem_mask, u32 vidmem_mask); 107 u32 sysmem_mask, u32 vidmem_mask);
108u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem, 108u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem,
109 u32 sysmem_mask, u32 vidmem_mask); 109 u32 sysmem_mask, u32 vidmem_mask);
110 110
111#endif 111#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index 3d908b0d..2da18fb8 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -51,7 +51,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
51 bool sparse, 51 bool sparse,
52 bool priv, 52 bool priv,
53 struct vm_gk20a_mapping_batch *batch, 53 struct vm_gk20a_mapping_batch *batch,
54 enum gk20a_aperture aperture) 54 enum nvgpu_aperture aperture)
55{ 55{
56 int err = 0; 56 int err = 0;
57 struct device *d = dev_from_vm(vm); 57 struct device *d = dev_from_vm(vm);
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 54ac36db..11fcf925 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -86,7 +86,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
86 bool sparse, 86 bool sparse,
87 bool priv, 87 bool priv,
88 struct vm_gk20a_mapping_batch *batch, 88 struct vm_gk20a_mapping_batch *batch,
89 enum gk20a_aperture aperture) 89 enum nvgpu_aperture aperture)
90{ 90{
91 int err = 0; 91 int err = 0;
92 struct device *d = dev_from_vm(vm); 92 struct device *d = dev_from_vm(vm);