summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-15 19:42:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:14:48 -0400
commitb69020bff5dfa69cad926c9374cdbe9a62509ffd (patch)
tree222f6b6bc23561a38004a257cbac401e431ff3be /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parentfa4ecf5730a75269e85cc41c2ad2ee61307e72a9 (diff)
gpu: nvgpu: Rename gk20a_mem_* functions
Rename the functions used for mem_desc access to nvgpu_mem_*. JIRA NVGPU-12 Change-Id: Ibfdc1112d43f0a125e4487c250e3f977ffd2cd75 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1323325 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c190
1 files changed, 95 insertions, 95 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 3e9a388b..360b8c97 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -110,13 +110,13 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
110 Flush and invalidate before cpu update. */ 110 Flush and invalidate before cpu update. */
111 g->ops.mm.l2_flush(g, true); 111 g->ops.mm.l2_flush(g, true);
112 112
113 if (gk20a_mem_begin(g, &ch_ctx->gr_ctx->mem)) 113 if (nvgpu_mem_begin(g, &ch_ctx->gr_ctx->mem))
114 return -ENOMEM; 114 return -ENOMEM;
115 115
116 *ctx_id = gk20a_mem_rd(g, &ch_ctx->gr_ctx->mem, 116 *ctx_id = nvgpu_mem_rd(g, &ch_ctx->gr_ctx->mem,
117 ctxsw_prog_main_image_context_id_o()); 117 ctxsw_prog_main_image_context_id_o());
118 118
119 gk20a_mem_end(g, &ch_ctx->gr_ctx->mem); 119 nvgpu_mem_end(g, &ch_ctx->gr_ctx->mem);
120 120
121 return 0; 121 return 0;
122} 122}
@@ -649,11 +649,11 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
649 addr_lo = u64_lo32(gpu_va) >> 12; 649 addr_lo = u64_lo32(gpu_va) >> 12;
650 addr_hi = u64_hi32(gpu_va); 650 addr_hi = u64_hi32(gpu_va);
651 651
652 gk20a_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_target_w(), 652 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_target_w(),
653 ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() | 653 ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() |
654 ram_in_gr_wfi_ptr_lo_f(addr_lo)); 654 ram_in_gr_wfi_ptr_lo_f(addr_lo));
655 655
656 gk20a_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_ptr_hi_w(), 656 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_gr_wfi_ptr_hi_w(),
657 ram_in_gr_wfi_ptr_hi_f(addr_hi)); 657 ram_in_gr_wfi_ptr_hi_f(addr_hi));
658 658
659 return 0; 659 return 0;
@@ -670,16 +670,16 @@ int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
670int gr_gk20a_ctx_patch_write_begin(struct gk20a *g, 670int gr_gk20a_ctx_patch_write_begin(struct gk20a *g,
671 struct channel_ctx_gk20a *ch_ctx) 671 struct channel_ctx_gk20a *ch_ctx)
672{ 672{
673 return gk20a_mem_begin(g, &ch_ctx->patch_ctx.mem); 673 return nvgpu_mem_begin(g, &ch_ctx->patch_ctx.mem);
674} 674}
675 675
676void gr_gk20a_ctx_patch_write_end(struct gk20a *g, 676void gr_gk20a_ctx_patch_write_end(struct gk20a *g,
677 struct channel_ctx_gk20a *ch_ctx) 677 struct channel_ctx_gk20a *ch_ctx)
678{ 678{
679 gk20a_mem_end(g, &ch_ctx->patch_ctx.mem); 679 nvgpu_mem_end(g, &ch_ctx->patch_ctx.mem);
680 /* Write context count to context image if it is mapped */ 680 /* Write context count to context image if it is mapped */
681 if (ch_ctx->gr_ctx->mem.cpu_va) { 681 if (ch_ctx->gr_ctx->mem.cpu_va) {
682 gk20a_mem_wr(g, &ch_ctx->gr_ctx->mem, 682 nvgpu_mem_wr(g, &ch_ctx->gr_ctx->mem,
683 ctxsw_prog_main_image_patch_count_o(), 683 ctxsw_prog_main_image_patch_count_o(),
684 ch_ctx->patch_ctx.data_count); 684 ch_ctx->patch_ctx.data_count);
685 } 685 }
@@ -691,8 +691,8 @@ void gr_gk20a_ctx_patch_write(struct gk20a *g,
691{ 691{
692 if (patch) { 692 if (patch) {
693 u32 patch_slot = ch_ctx->patch_ctx.data_count * 2; 693 u32 patch_slot = ch_ctx->patch_ctx.data_count * 2;
694 gk20a_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot, addr); 694 nvgpu_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot, addr);
695 gk20a_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot + 1, data); 695 nvgpu_mem_wr32(g, &ch_ctx->patch_ctx.mem, patch_slot + 1, data);
696 ch_ctx->patch_ctx.data_count++; 696 ch_ctx->patch_ctx.data_count++;
697 } else { 697 } else {
698 gk20a_writel(g, addr, data); 698 gk20a_writel(g, addr, data);
@@ -703,7 +703,7 @@ static u32 fecs_current_ctx_data(struct gk20a *g, struct mem_desc *inst_block)
703{ 703{
704 u32 ptr = u64_lo32(gk20a_mm_inst_block_addr(g, inst_block) 704 u32 ptr = u64_lo32(gk20a_mm_inst_block_addr(g, inst_block)
705 >> ram_in_base_shift_v()); 705 >> ram_in_base_shift_v());
706 u32 aperture = gk20a_aperture_mask(g, inst_block, 706 u32 aperture = nvgpu_aperture_mask(g, inst_block,
707 gr_fecs_current_ctx_target_sys_mem_ncoh_f(), 707 gr_fecs_current_ctx_target_sys_mem_ncoh_f(),
708 gr_fecs_current_ctx_target_vid_mem_f()); 708 gr_fecs_current_ctx_target_vid_mem_f());
709 709
@@ -745,7 +745,7 @@ void gr_gk20a_write_zcull_ptr(struct gk20a *g,
745{ 745{
746 u32 va = u64_lo32(gpu_va >> 8); 746 u32 va = u64_lo32(gpu_va >> 8);
747 747
748 gk20a_mem_wr(g, mem, 748 nvgpu_mem_wr(g, mem,
749 ctxsw_prog_main_image_zcull_ptr_o(), va); 749 ctxsw_prog_main_image_zcull_ptr_o(), va);
750} 750}
751 751
@@ -754,7 +754,7 @@ void gr_gk20a_write_pm_ptr(struct gk20a *g,
754{ 754{
755 u32 va = u64_lo32(gpu_va >> 8); 755 u32 va = u64_lo32(gpu_va >> 8);
756 756
757 gk20a_mem_wr(g, mem, 757 nvgpu_mem_wr(g, mem,
758 ctxsw_prog_main_image_pm_ptr_o(), va); 758 ctxsw_prog_main_image_pm_ptr_o(), va);
759} 759}
760 760
@@ -768,10 +768,10 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
768 768
769 gk20a_dbg_fn(""); 769 gk20a_dbg_fn("");
770 770
771 if (gk20a_mem_begin(g, mem)) 771 if (nvgpu_mem_begin(g, mem))
772 return -ENOMEM; 772 return -ENOMEM;
773 773
774 if (gk20a_mem_begin(g, ctxheader)) { 774 if (nvgpu_mem_begin(g, ctxheader)) {
775 ret = -ENOMEM; 775 ret = -ENOMEM;
776 goto clean_up_mem; 776 goto clean_up_mem;
777 } 777 }
@@ -795,7 +795,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
795 goto clean_up; 795 goto clean_up;
796 } 796 }
797 797
798 gk20a_mem_wr(g, mem, 798 nvgpu_mem_wr(g, mem,
799 ctxsw_prog_main_image_zcull_o(), 799 ctxsw_prog_main_image_zcull_o(),
800 ch_ctx->zcull_ctx.ctx_sw_mode); 800 ch_ctx->zcull_ctx.ctx_sw_mode);
801 801
@@ -808,9 +808,9 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
808 gk20a_enable_channel_tsg(g, c); 808 gk20a_enable_channel_tsg(g, c);
809 809
810clean_up: 810clean_up:
811 gk20a_mem_end(g, ctxheader); 811 nvgpu_mem_end(g, ctxheader);
812clean_up_mem: 812clean_up_mem:
813 gk20a_mem_end(g, mem); 813 nvgpu_mem_end(g, mem);
814 814
815 return ret; 815 return ret;
816} 816}
@@ -1756,10 +1756,10 @@ restore_fe_go_idle:
1756 goto restore_fe_go_idle; 1756 goto restore_fe_go_idle;
1757 } 1757 }
1758 1758
1759 if (gk20a_mem_begin(g, gold_mem)) 1759 if (nvgpu_mem_begin(g, gold_mem))
1760 goto clean_up; 1760 goto clean_up;
1761 1761
1762 if (gk20a_mem_begin(g, gr_mem)) 1762 if (nvgpu_mem_begin(g, gr_mem))
1763 goto clean_up; 1763 goto clean_up;
1764 1764
1765 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32)); 1765 ctx_header_words = roundup(ctx_header_bytes, sizeof(u32));
@@ -1768,26 +1768,26 @@ restore_fe_go_idle:
1768 g->ops.mm.l2_flush(g, true); 1768 g->ops.mm.l2_flush(g, true);
1769 1769
1770 for (i = 0; i < ctx_header_words; i++) { 1770 for (i = 0; i < ctx_header_words; i++) {
1771 data = gk20a_mem_rd32(g, gr_mem, i); 1771 data = nvgpu_mem_rd32(g, gr_mem, i);
1772 gk20a_mem_wr32(g, gold_mem, i, data); 1772 nvgpu_mem_wr32(g, gold_mem, i, data);
1773 } 1773 }
1774 gk20a_mem_wr(g, gold_mem, ctxsw_prog_main_image_zcull_o(), 1774 nvgpu_mem_wr(g, gold_mem, ctxsw_prog_main_image_zcull_o(),
1775 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v()); 1775 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v());
1776 1776
1777 if (gk20a_mem_begin(g, ctxheader)) 1777 if (nvgpu_mem_begin(g, ctxheader))
1778 goto clean_up; 1778 goto clean_up;
1779 1779
1780 if (ctxheader->gpu_va) 1780 if (ctxheader->gpu_va)
1781 g->ops.gr.write_zcull_ptr(g, ctxheader, 0); 1781 g->ops.gr.write_zcull_ptr(g, ctxheader, 0);
1782 else 1782 else
1783 g->ops.gr.write_zcull_ptr(g, gold_mem, 0); 1783 g->ops.gr.write_zcull_ptr(g, gold_mem, 0);
1784 gk20a_mem_end(g, ctxheader); 1784 nvgpu_mem_end(g, ctxheader);
1785 1785
1786 g->ops.gr.commit_inst(c, ch_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]); 1786 g->ops.gr.commit_inst(c, ch_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]);
1787 1787
1788 gr_gk20a_fecs_ctx_image_save(c, gr_fecs_method_push_adr_wfi_golden_save_v()); 1788 gr_gk20a_fecs_ctx_image_save(c, gr_fecs_method_push_adr_wfi_golden_save_v());
1789 1789
1790 if (gk20a_mem_begin(g, ctxheader)) 1790 if (nvgpu_mem_begin(g, ctxheader))
1791 goto clean_up; 1791 goto clean_up;
1792 1792
1793 if (gr->ctx_vars.local_golden_image == NULL) { 1793 if (gr->ctx_vars.local_golden_image == NULL) {
@@ -1801,15 +1801,15 @@ restore_fe_go_idle:
1801 } 1801 }
1802 1802
1803 if (ctxheader->gpu_va) 1803 if (ctxheader->gpu_va)
1804 gk20a_mem_rd_n(g, ctxheader, 0, 1804 nvgpu_mem_rd_n(g, ctxheader, 0,
1805 gr->ctx_vars.local_golden_image, 1805 gr->ctx_vars.local_golden_image,
1806 gr->ctx_vars.golden_image_size); 1806 gr->ctx_vars.golden_image_size);
1807 else 1807 else
1808 gk20a_mem_rd_n(g, gold_mem, 0, 1808 nvgpu_mem_rd_n(g, gold_mem, 0,
1809 gr->ctx_vars.local_golden_image, 1809 gr->ctx_vars.local_golden_image,
1810 gr->ctx_vars.golden_image_size); 1810 gr->ctx_vars.golden_image_size);
1811 } 1811 }
1812 gk20a_mem_end(g, ctxheader); 1812 nvgpu_mem_end(g, ctxheader);
1813 1813
1814 g->ops.gr.commit_inst(c, gr_mem->gpu_va); 1814 g->ops.gr.commit_inst(c, gr_mem->gpu_va);
1815 1815
@@ -1824,8 +1824,8 @@ clean_up:
1824 else 1824 else
1825 gk20a_dbg_fn("done"); 1825 gk20a_dbg_fn("done");
1826 1826
1827 gk20a_mem_end(g, gold_mem); 1827 nvgpu_mem_end(g, gold_mem);
1828 gk20a_mem_end(g, gr_mem); 1828 nvgpu_mem_end(g, gr_mem);
1829 1829
1830 nvgpu_mutex_release(&gr->ctx_mutex); 1830 nvgpu_mutex_release(&gr->ctx_mutex);
1831 return err; 1831 return err;
@@ -1865,22 +1865,22 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1865 Flush and invalidate before cpu update. */ 1865 Flush and invalidate before cpu update. */
1866 g->ops.mm.l2_flush(g, true); 1866 g->ops.mm.l2_flush(g, true);
1867 1867
1868 if (gk20a_mem_begin(g, mem)) { 1868 if (nvgpu_mem_begin(g, mem)) {
1869 ret = -ENOMEM; 1869 ret = -ENOMEM;
1870 goto out; 1870 goto out;
1871 } 1871 }
1872 1872
1873 data = gk20a_mem_rd(g, mem, 1873 data = nvgpu_mem_rd(g, mem,
1874 ctxsw_prog_main_image_pm_o()); 1874 ctxsw_prog_main_image_pm_o());
1875 data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m(); 1875 data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m();
1876 data |= enable_smpc_ctxsw ? 1876 data |= enable_smpc_ctxsw ?
1877 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() : 1877 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() :
1878 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(); 1878 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f();
1879 gk20a_mem_wr(g, mem, 1879 nvgpu_mem_wr(g, mem,
1880 ctxsw_prog_main_image_pm_o(), 1880 ctxsw_prog_main_image_pm_o(),
1881 data); 1881 data);
1882 1882
1883 gk20a_mem_end(g, mem); 1883 nvgpu_mem_end(g, mem);
1884 1884
1885out: 1885out:
1886 gk20a_enable_channel_tsg(g, c); 1886 gk20a_enable_channel_tsg(g, c);
@@ -1964,27 +1964,27 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1964 } 1964 }
1965 1965
1966 /* Now clear the buffer */ 1966 /* Now clear the buffer */
1967 if (gk20a_mem_begin(g, &pm_ctx->mem)) { 1967 if (nvgpu_mem_begin(g, &pm_ctx->mem)) {
1968 ret = -ENOMEM; 1968 ret = -ENOMEM;
1969 goto cleanup_pm_buf; 1969 goto cleanup_pm_buf;
1970 } 1970 }
1971 1971
1972 gk20a_memset(g, &pm_ctx->mem, 0, 0, pm_ctx->mem.size); 1972 nvgpu_memset(g, &pm_ctx->mem, 0, 0, pm_ctx->mem.size);
1973 1973
1974 gk20a_mem_end(g, &pm_ctx->mem); 1974 nvgpu_mem_end(g, &pm_ctx->mem);
1975 } 1975 }
1976 1976
1977 if (gk20a_mem_begin(g, gr_mem)) { 1977 if (nvgpu_mem_begin(g, gr_mem)) {
1978 ret = -ENOMEM; 1978 ret = -ENOMEM;
1979 goto cleanup_pm_buf; 1979 goto cleanup_pm_buf;
1980 } 1980 }
1981 1981
1982 if (gk20a_mem_begin(g, ctxheader)) { 1982 if (nvgpu_mem_begin(g, ctxheader)) {
1983 ret = -ENOMEM; 1983 ret = -ENOMEM;
1984 goto clean_up_mem; 1984 goto clean_up_mem;
1985 } 1985 }
1986 1986
1987 data = gk20a_mem_rd(g, gr_mem, ctxsw_prog_main_image_pm_o()); 1987 data = nvgpu_mem_rd(g, gr_mem, ctxsw_prog_main_image_pm_o());
1988 data = data & ~ctxsw_prog_main_image_pm_mode_m(); 1988 data = data & ~ctxsw_prog_main_image_pm_mode_m();
1989 1989
1990 if (enable_hwpm_ctxsw) { 1990 if (enable_hwpm_ctxsw) {
@@ -1998,22 +1998,22 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1998 1998
1999 data |= pm_ctx->pm_mode; 1999 data |= pm_ctx->pm_mode;
2000 2000
2001 gk20a_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data); 2001 nvgpu_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data);
2002 2002
2003 if (ctxheader->gpu_va) 2003 if (ctxheader->gpu_va)
2004 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr); 2004 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr);
2005 else 2005 else
2006 g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr); 2006 g->ops.gr.write_pm_ptr(g, gr_mem, virt_addr);
2007 2007
2008 gk20a_mem_end(g, ctxheader); 2008 nvgpu_mem_end(g, ctxheader);
2009 gk20a_mem_end(g, gr_mem); 2009 nvgpu_mem_end(g, gr_mem);
2010 2010
2011 /* enable channel */ 2011 /* enable channel */
2012 gk20a_enable_channel_tsg(g, c); 2012 gk20a_enable_channel_tsg(g, c);
2013 2013
2014 return 0; 2014 return 0;
2015clean_up_mem: 2015clean_up_mem:
2016 gk20a_mem_end(g, gr_mem); 2016 nvgpu_mem_end(g, gr_mem);
2017cleanup_pm_buf: 2017cleanup_pm_buf:
2018 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, 2018 gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
2019 gk20a_mem_flag_none); 2019 gk20a_mem_flag_none);
@@ -2048,10 +2048,10 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2048 Flush and invalidate before cpu update. */ 2048 Flush and invalidate before cpu update. */
2049 g->ops.mm.l2_flush(g, true); 2049 g->ops.mm.l2_flush(g, true);
2050 2050
2051 if (gk20a_mem_begin(g, mem)) 2051 if (nvgpu_mem_begin(g, mem))
2052 return -ENOMEM; 2052 return -ENOMEM;
2053 2053
2054 if (gk20a_mem_begin(g, ctxheader)) { 2054 if (nvgpu_mem_begin(g, ctxheader)) {
2055 ret = -ENOMEM; 2055 ret = -ENOMEM;
2056 goto clean_up_mem; 2056 goto clean_up_mem;
2057 } 2057 }
@@ -2060,12 +2060,12 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2060 if (g->ops.gr.restore_context_header) 2060 if (g->ops.gr.restore_context_header)
2061 g->ops.gr.restore_context_header(g, ctxheader); 2061 g->ops.gr.restore_context_header(g, ctxheader);
2062 } else { 2062 } else {
2063 gk20a_mem_wr_n(g, mem, 0, 2063 nvgpu_mem_wr_n(g, mem, 0,
2064 gr->ctx_vars.local_golden_image, 2064 gr->ctx_vars.local_golden_image,
2065 gr->ctx_vars.golden_image_size); 2065 gr->ctx_vars.golden_image_size);
2066 gk20a_mem_wr(g, mem, 2066 nvgpu_mem_wr(g, mem,
2067 ctxsw_prog_main_image_num_save_ops_o(), 0); 2067 ctxsw_prog_main_image_num_save_ops_o(), 0);
2068 gk20a_mem_wr(g, mem, 2068 nvgpu_mem_wr(g, mem,
2069 ctxsw_prog_main_image_num_restore_ops_o(), 0); 2069 ctxsw_prog_main_image_num_restore_ops_o(), 0);
2070 } 2070 }
2071 2071
@@ -2083,29 +2083,29 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2083 else 2083 else
2084 data = ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(); 2084 data = ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f();
2085 2085
2086 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_priv_access_map_config_o(), 2086 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_priv_access_map_config_o(),
2087 data); 2087 data);
2088 2088
2089 if (ctxheader->gpu_va) { 2089 if (ctxheader->gpu_va) {
2090 gk20a_mem_wr(g, ctxheader, 2090 nvgpu_mem_wr(g, ctxheader,
2091 ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 2091 ctxsw_prog_main_image_priv_access_map_addr_lo_o(),
2092 virt_addr_lo); 2092 virt_addr_lo);
2093 gk20a_mem_wr(g, ctxheader, 2093 nvgpu_mem_wr(g, ctxheader,
2094 ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 2094 ctxsw_prog_main_image_priv_access_map_addr_hi_o(),
2095 virt_addr_hi); 2095 virt_addr_hi);
2096 } else { 2096 } else {
2097 gk20a_mem_wr(g, mem, 2097 nvgpu_mem_wr(g, mem,
2098 ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 2098 ctxsw_prog_main_image_priv_access_map_addr_lo_o(),
2099 virt_addr_lo); 2099 virt_addr_lo);
2100 gk20a_mem_wr(g, mem, 2100 nvgpu_mem_wr(g, mem,
2101 ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 2101 ctxsw_prog_main_image_priv_access_map_addr_hi_o(),
2102 virt_addr_hi); 2102 virt_addr_hi);
2103 } 2103 }
2104 /* disable verif features */ 2104 /* disable verif features */
2105 v = gk20a_mem_rd(g, mem, ctxsw_prog_main_image_misc_options_o()); 2105 v = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_misc_options_o());
2106 v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m()); 2106 v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m());
2107 v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f(); 2107 v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f();
2108 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_misc_options_o(), v); 2108 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_misc_options_o(), v);
2109 2109
2110 if (g->ops.gr.update_ctxsw_preemption_mode) 2110 if (g->ops.gr.update_ctxsw_preemption_mode)
2111 g->ops.gr.update_ctxsw_preemption_mode(g, ch_ctx, mem); 2111 g->ops.gr.update_ctxsw_preemption_mode(g, ch_ctx, mem);
@@ -2116,26 +2116,26 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2116 virt_addr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va); 2116 virt_addr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va);
2117 virt_addr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va); 2117 virt_addr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va);
2118 2118
2119 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_count_o(), 2119 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_count_o(),
2120 ch_ctx->patch_ctx.data_count); 2120 ch_ctx->patch_ctx.data_count);
2121 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_lo_o(), 2121 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_lo_o(),
2122 virt_addr_lo); 2122 virt_addr_lo);
2123 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_hi_o(), 2123 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_patch_adr_hi_o(),
2124 virt_addr_hi); 2124 virt_addr_hi);
2125 2125
2126 if (ctxheader->gpu_va) { 2126 if (ctxheader->gpu_va) {
2127 gk20a_mem_wr(g, ctxheader, 2127 nvgpu_mem_wr(g, ctxheader,
2128 ctxsw_prog_main_image_patch_count_o(), 2128 ctxsw_prog_main_image_patch_count_o(),
2129 ch_ctx->patch_ctx.data_count); 2129 ch_ctx->patch_ctx.data_count);
2130 gk20a_mem_wr(g, ctxheader, 2130 nvgpu_mem_wr(g, ctxheader,
2131 ctxsw_prog_main_image_patch_adr_lo_o(), 2131 ctxsw_prog_main_image_patch_adr_lo_o(),
2132 virt_addr_lo); 2132 virt_addr_lo);
2133 gk20a_mem_wr(g, ctxheader, 2133 nvgpu_mem_wr(g, ctxheader,
2134 ctxsw_prog_main_image_patch_adr_hi_o(), 2134 ctxsw_prog_main_image_patch_adr_hi_o(),
2135 virt_addr_hi); 2135 virt_addr_hi);
2136 } 2136 }
2137 2137
2138 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_zcull_o(), 2138 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_zcull_o(),
2139 ch_ctx->zcull_ctx.ctx_sw_mode); 2139 ch_ctx->zcull_ctx.ctx_sw_mode);
2140 2140
2141 if (ctxheader->gpu_va) 2141 if (ctxheader->gpu_va)
@@ -2153,7 +2153,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2153 if (ch_ctx->pm_ctx.mem.gpu_va == 0) { 2153 if (ch_ctx->pm_ctx.mem.gpu_va == 0) {
2154 gk20a_err(dev_from_gk20a(g), 2154 gk20a_err(dev_from_gk20a(g),
2155 "context switched pm with no pm buffer!"); 2155 "context switched pm with no pm buffer!");
2156 gk20a_mem_end(g, mem); 2156 nvgpu_mem_end(g, mem);
2157 return -EFAULT; 2157 return -EFAULT;
2158 } 2158 }
2159 2159
@@ -2161,11 +2161,11 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2161 } else 2161 } else
2162 virt_addr = 0; 2162 virt_addr = 0;
2163 2163
2164 data = gk20a_mem_rd(g, mem, ctxsw_prog_main_image_pm_o()); 2164 data = nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_pm_o());
2165 data = data & ~ctxsw_prog_main_image_pm_mode_m(); 2165 data = data & ~ctxsw_prog_main_image_pm_mode_m();
2166 data |= ch_ctx->pm_ctx.pm_mode; 2166 data |= ch_ctx->pm_ctx.pm_mode;
2167 2167
2168 gk20a_mem_wr(g, mem, ctxsw_prog_main_image_pm_o(), data); 2168 nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_pm_o(), data);
2169 2169
2170 if (ctxheader->gpu_va) 2170 if (ctxheader->gpu_va)
2171 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr); 2171 g->ops.gr.write_pm_ptr(g, ctxheader, virt_addr);
@@ -2173,9 +2173,9 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
2173 g->ops.gr.write_pm_ptr(g, mem, virt_addr); 2173 g->ops.gr.write_pm_ptr(g, mem, virt_addr);
2174 2174
2175 2175
2176 gk20a_mem_end(g, ctxheader); 2176 nvgpu_mem_end(g, ctxheader);
2177clean_up_mem: 2177clean_up_mem:
2178 gk20a_mem_end(g, mem); 2178 nvgpu_mem_end(g, mem);
2179 2179
2180 return ret; 2180 return ret;
2181} 2181}
@@ -2256,11 +2256,11 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
2256{ 2256{
2257 unsigned int i; 2257 unsigned int i;
2258 2258
2259 gk20a_mem_wr_n(g, dst, segments->boot.offset, bootimage, 2259 nvgpu_mem_wr_n(g, dst, segments->boot.offset, bootimage,
2260 segments->boot.size); 2260 segments->boot.size);
2261 gk20a_mem_wr_n(g, dst, segments->code.offset, code, 2261 nvgpu_mem_wr_n(g, dst, segments->code.offset, code,
2262 segments->code.size); 2262 segments->code.size);
2263 gk20a_mem_wr_n(g, dst, segments->data.offset, data, 2263 nvgpu_mem_wr_n(g, dst, segments->data.offset, data,
2264 segments->data.size); 2264 segments->data.size);
2265 2265
2266 /* compute a "checksum" for the boot binary to detect its version */ 2266 /* compute a "checksum" for the boot binary to detect its version */
@@ -2382,14 +2382,14 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
2382 inst_ptr = gk20a_mm_inst_block_addr(g, &ucode_info->inst_blk_desc); 2382 inst_ptr = gk20a_mm_inst_block_addr(g, &ucode_info->inst_blk_desc);
2383 gk20a_writel(g, gr_fecs_new_ctx_r(), 2383 gk20a_writel(g, gr_fecs_new_ctx_r(),
2384 gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) | 2384 gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) |
2385 gk20a_aperture_mask(g, &ucode_info->inst_blk_desc, 2385 nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
2386 gr_fecs_new_ctx_target_sys_mem_ncoh_f(), 2386 gr_fecs_new_ctx_target_sys_mem_ncoh_f(),
2387 gr_fecs_new_ctx_target_vid_mem_f()) | 2387 gr_fecs_new_ctx_target_vid_mem_f()) |
2388 gr_fecs_new_ctx_valid_m()); 2388 gr_fecs_new_ctx_valid_m());
2389 2389
2390 gk20a_writel(g, gr_fecs_arb_ctx_ptr_r(), 2390 gk20a_writel(g, gr_fecs_arb_ctx_ptr_r(),
2391 gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) | 2391 gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) |
2392 gk20a_aperture_mask(g, &ucode_info->inst_blk_desc, 2392 nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
2393 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(), 2393 gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(),
2394 gr_fecs_arb_ctx_ptr_target_vid_mem_f())); 2394 gr_fecs_arb_ctx_ptr_target_vid_mem_f()));
2395 2395
@@ -4748,7 +4748,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4748 addr >>= fb_mmu_debug_wr_addr_alignment_v(); 4748 addr >>= fb_mmu_debug_wr_addr_alignment_v();
4749 4749
4750 gk20a_writel(g, fb_mmu_debug_wr_r(), 4750 gk20a_writel(g, fb_mmu_debug_wr_r(),
4751 gk20a_aperture_mask(g, &gr->mmu_wr_mem, 4751 nvgpu_aperture_mask(g, &gr->mmu_wr_mem,
4752 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(), 4752 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
4753 fb_mmu_debug_wr_aperture_vid_mem_f()) | 4753 fb_mmu_debug_wr_aperture_vid_mem_f()) |
4754 fb_mmu_debug_wr_vol_false_f() | 4754 fb_mmu_debug_wr_vol_false_f() |
@@ -4758,7 +4758,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4758 addr >>= fb_mmu_debug_rd_addr_alignment_v(); 4758 addr >>= fb_mmu_debug_rd_addr_alignment_v();
4759 4759
4760 gk20a_writel(g, fb_mmu_debug_rd_r(), 4760 gk20a_writel(g, fb_mmu_debug_rd_r(),
4761 gk20a_aperture_mask(g, &gr->mmu_rd_mem, 4761 nvgpu_aperture_mask(g, &gr->mmu_rd_mem,
4762 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(), 4762 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
4763 fb_mmu_debug_rd_aperture_vid_mem_f()) | 4763 fb_mmu_debug_rd_aperture_vid_mem_f()) |
4764 fb_mmu_debug_rd_vol_false_f() | 4764 fb_mmu_debug_rd_vol_false_f() |
@@ -5092,13 +5092,13 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
5092 u32 *whitelist = NULL; 5092 u32 *whitelist = NULL;
5093 unsigned int num_entries = 0; 5093 unsigned int num_entries = 0;
5094 5094
5095 if (gk20a_mem_begin(g, mem)) { 5095 if (nvgpu_mem_begin(g, mem)) {
5096 gk20a_err(dev_from_gk20a(g), 5096 gk20a_err(dev_from_gk20a(g),
5097 "failed to map priv access map memory"); 5097 "failed to map priv access map memory");
5098 return -ENOMEM; 5098 return -ENOMEM;
5099 } 5099 }
5100 5100
5101 gk20a_memset(g, mem, 0, 0, PAGE_SIZE * nr_pages); 5101 nvgpu_memset(g, mem, 0, 0, PAGE_SIZE * nr_pages);
5102 5102
5103 g->ops.gr.get_access_map(g, &whitelist, &num_entries); 5103 g->ops.gr.get_access_map(g, &whitelist, &num_entries);
5104 5104
@@ -5109,14 +5109,14 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
5109 map_shift = map_bit & 0x7; /* i.e. 0-7 */ 5109 map_shift = map_bit & 0x7; /* i.e. 0-7 */
5110 gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d", 5110 gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d",
5111 whitelist[w], map_byte, map_shift); 5111 whitelist[w], map_byte, map_shift);
5112 x = gk20a_mem_rd32(g, mem, map_byte / sizeof(u32)); 5112 x = nvgpu_mem_rd32(g, mem, map_byte / sizeof(u32));
5113 x |= 1 << ( 5113 x |= 1 << (
5114 (map_byte % sizeof(u32) * BITS_PER_BYTE) 5114 (map_byte % sizeof(u32) * BITS_PER_BYTE)
5115 + map_shift); 5115 + map_shift);
5116 gk20a_mem_wr32(g, mem, map_byte / sizeof(u32), x); 5116 nvgpu_mem_wr32(g, mem, map_byte / sizeof(u32), x);
5117 } 5117 }
5118 5118
5119 gk20a_mem_end(g, mem); 5119 nvgpu_mem_end(g, mem);
5120 return 0; 5120 return 0;
5121} 5121}
5122 5122
@@ -7160,7 +7160,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
7160 /* reset the patch count from previous 7160 /* reset the patch count from previous
7161 runs,if ucode has already processed 7161 runs,if ucode has already processed
7162 it */ 7162 it */
7163 tmp = gk20a_mem_rd(g, mem, 7163 tmp = nvgpu_mem_rd(g, mem,
7164 ctxsw_prog_main_image_patch_count_o()); 7164 ctxsw_prog_main_image_patch_count_o());
7165 7165
7166 if (!tmp) 7166 if (!tmp)
@@ -7172,13 +7172,13 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
7172 vaddr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va); 7172 vaddr_lo = u64_lo32(ch_ctx->patch_ctx.mem.gpu_va);
7173 vaddr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va); 7173 vaddr_hi = u64_hi32(ch_ctx->patch_ctx.mem.gpu_va);
7174 7174
7175 gk20a_mem_wr(g, mem, 7175 nvgpu_mem_wr(g, mem,
7176 ctxsw_prog_main_image_patch_count_o(), 7176 ctxsw_prog_main_image_patch_count_o(),
7177 ch_ctx->patch_ctx.data_count); 7177 ch_ctx->patch_ctx.data_count);
7178 gk20a_mem_wr(g, mem, 7178 nvgpu_mem_wr(g, mem,
7179 ctxsw_prog_main_image_patch_adr_lo_o(), 7179 ctxsw_prog_main_image_patch_adr_lo_o(),
7180 vaddr_lo); 7180 vaddr_lo);
7181 gk20a_mem_wr(g, mem, 7181 nvgpu_mem_wr(g, mem,
7182 ctxsw_prog_main_image_patch_adr_hi_o(), 7182 ctxsw_prog_main_image_patch_adr_hi_o(),
7183 vaddr_hi); 7183 vaddr_hi);
7184 7184
@@ -8393,7 +8393,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8393 * gr_gk20a_apply_instmem_overrides, 8393 * gr_gk20a_apply_instmem_overrides,
8394 * recoded in-place instead. 8394 * recoded in-place instead.
8395 */ 8395 */
8396 if (gk20a_mem_begin(g, &ch_ctx->gr_ctx->mem)) { 8396 if (nvgpu_mem_begin(g, &ch_ctx->gr_ctx->mem)) {
8397 err = -ENOMEM; 8397 err = -ENOMEM;
8398 goto cleanup; 8398 goto cleanup;
8399 } 8399 }
@@ -8422,7 +8422,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8422 err = -EINVAL; 8422 err = -EINVAL;
8423 goto cleanup; 8423 goto cleanup;
8424 } 8424 }
8425 if (gk20a_mem_begin(g, &ch_ctx->pm_ctx.mem)) { 8425 if (nvgpu_mem_begin(g, &ch_ctx->pm_ctx.mem)) {
8426 err = -ENOMEM; 8426 err = -ENOMEM;
8427 goto cleanup; 8427 goto cleanup;
8428 } 8428 }
@@ -8445,20 +8445,20 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8445 (offsets[j] >= g->gr.ctx_vars.golden_image_size)) 8445 (offsets[j] >= g->gr.ctx_vars.golden_image_size))
8446 continue; 8446 continue;
8447 if (pass == 0) { /* write pass */ 8447 if (pass == 0) { /* write pass */
8448 v = gk20a_mem_rd(g, current_mem, offsets[j]); 8448 v = nvgpu_mem_rd(g, current_mem, offsets[j]);
8449 v &= ~ctx_ops[i].and_n_mask_lo; 8449 v &= ~ctx_ops[i].and_n_mask_lo;
8450 v |= ctx_ops[i].value_lo; 8450 v |= ctx_ops[i].value_lo;
8451 gk20a_mem_wr(g, current_mem, offsets[j], v); 8451 nvgpu_mem_wr(g, current_mem, offsets[j], v);
8452 8452
8453 gk20a_dbg(gpu_dbg_gpu_dbg, 8453 gk20a_dbg(gpu_dbg_gpu_dbg,
8454 "context wr: offset=0x%x v=0x%x", 8454 "context wr: offset=0x%x v=0x%x",
8455 offsets[j], v); 8455 offsets[j], v);
8456 8456
8457 if (ctx_ops[i].op == REGOP(WRITE_64)) { 8457 if (ctx_ops[i].op == REGOP(WRITE_64)) {
8458 v = gk20a_mem_rd(g, current_mem, offsets[j] + 4); 8458 v = nvgpu_mem_rd(g, current_mem, offsets[j] + 4);
8459 v &= ~ctx_ops[i].and_n_mask_hi; 8459 v &= ~ctx_ops[i].and_n_mask_hi;
8460 v |= ctx_ops[i].value_hi; 8460 v |= ctx_ops[i].value_hi;
8461 gk20a_mem_wr(g, current_mem, offsets[j] + 4, v); 8461 nvgpu_mem_wr(g, current_mem, offsets[j] + 4, v);
8462 8462
8463 gk20a_dbg(gpu_dbg_gpu_dbg, 8463 gk20a_dbg(gpu_dbg_gpu_dbg,
8464 "context wr: offset=0x%x v=0x%x", 8464 "context wr: offset=0x%x v=0x%x",
@@ -8472,14 +8472,14 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8472 8472
8473 } else { /* read pass */ 8473 } else { /* read pass */
8474 ctx_ops[i].value_lo = 8474 ctx_ops[i].value_lo =
8475 gk20a_mem_rd(g, current_mem, offsets[0]); 8475 nvgpu_mem_rd(g, current_mem, offsets[0]);
8476 8476
8477 gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x", 8477 gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
8478 offsets[0], ctx_ops[i].value_lo); 8478 offsets[0], ctx_ops[i].value_lo);
8479 8479
8480 if (ctx_ops[i].op == REGOP(READ_64)) { 8480 if (ctx_ops[i].op == REGOP(READ_64)) {
8481 ctx_ops[i].value_hi = 8481 ctx_ops[i].value_hi =
8482 gk20a_mem_rd(g, current_mem, offsets[0] + 4); 8482 nvgpu_mem_rd(g, current_mem, offsets[0] + 4);
8483 8483
8484 gk20a_dbg(gpu_dbg_gpu_dbg, 8484 gk20a_dbg(gpu_dbg_gpu_dbg,
8485 "context rd: offset=0x%x v=0x%x", 8485 "context rd: offset=0x%x v=0x%x",
@@ -8507,9 +8507,9 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8507 if (ch_ctx->patch_ctx.mem.cpu_va) 8507 if (ch_ctx->patch_ctx.mem.cpu_va)
8508 gr_gk20a_ctx_patch_write_end(g, ch_ctx); 8508 gr_gk20a_ctx_patch_write_end(g, ch_ctx);
8509 if (gr_ctx_ready) 8509 if (gr_ctx_ready)
8510 gk20a_mem_end(g, &ch_ctx->gr_ctx->mem); 8510 nvgpu_mem_end(g, &ch_ctx->gr_ctx->mem);
8511 if (pm_ctx_ready) 8511 if (pm_ctx_ready)
8512 gk20a_mem_end(g, &ch_ctx->pm_ctx.mem); 8512 nvgpu_mem_end(g, &ch_ctx->pm_ctx.mem);
8513 8513
8514 if (restart_gr_ctxsw) { 8514 if (restart_gr_ctxsw) {
8515 int tmp_err = gr_gk20a_enable_ctxsw(g); 8515 int tmp_err = gr_gk20a_enable_ctxsw(g);