From b69020bff5dfa69cad926c9374cdbe9a62509ffd Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Wed, 15 Mar 2017 16:42:12 -0700 Subject: gpu: nvgpu: Rename gk20a_mem_* functions Rename the functions used for mem_desc access to nvgpu_mem_*. JIRA NVGPU-12 Change-Id: Ibfdc1112d43f0a125e4487c250e3f977ffd2cd75 Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1323325 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gp10b/fifo_gp10b.c | 40 ++++++++++++++++---------------- drivers/gpu/nvgpu/gp10b/gr_gp10b.c | 44 ++++++++++++++++++------------------ drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 22 +++++++++--------- 3 files changed, 53 insertions(+), 53 deletions(-) (limited to 'drivers/gpu/nvgpu/gp10b') diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c index 6f1a0298..3787662b 100644 --- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c @@ -33,18 +33,18 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g, gk20a_dbg_fn(""); - val = gk20a_mem_rd32(g, mem, + val = nvgpu_mem_rd32(g, mem, ram_in_page_dir_base_fault_replay_tex_w()); val &= ~ram_in_page_dir_base_fault_replay_tex_m(); val |= ram_in_page_dir_base_fault_replay_tex_true_f(); - gk20a_mem_wr32(g, mem, + nvgpu_mem_wr32(g, mem, ram_in_page_dir_base_fault_replay_tex_w(), val); - val = gk20a_mem_rd32(g, mem, + val = nvgpu_mem_rd32(g, mem, ram_in_page_dir_base_fault_replay_gcc_w()); val &= ~ram_in_page_dir_base_fault_replay_gcc_m(); val |= ram_in_page_dir_base_fault_replay_gcc_true_f(); - gk20a_mem_wr32(g, mem, + nvgpu_mem_wr32(g, mem, ram_in_page_dir_base_fault_replay_gcc_w(), val); gk20a_dbg_fn("done"); @@ -64,14 +64,14 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c) gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", c->hw_chid, (u64)c->userd_iova); - gk20a_mem_wr32(g, &c->inst_block, + nvgpu_mem_wr32(g, &c->inst_block, ram_in_ramfc_w() + ram_fc_userd_w(), (g->mm.vidmem_is_vidmem ? pbdma_userd_target_sys_mem_ncoh_f() : pbdma_userd_target_vid_mem_f()) | pbdma_userd_addr_f(addr_lo)); - gk20a_mem_wr32(g, &c->inst_block, + nvgpu_mem_wr32(g, &c->inst_block, ram_in_ramfc_w() + ram_fc_userd_hi_w(), pbdma_userd_hi_addr_f(addr_hi)); @@ -87,25 +87,25 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c, gk20a_dbg_fn(""); - gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v()); + nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); - gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(), + nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(), pbdma_gp_base_offset_f( u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); - gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), + nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); - gk20a_mem_wr32(g, mem, ram_fc_signature_w(), + nvgpu_mem_wr32(g, mem, ram_fc_signature_w(), c->g->ops.fifo.get_pbdma_signature(c->g)); - gk20a_mem_wr32(g, mem, ram_fc_formats_w(), + nvgpu_mem_wr32(g, mem, ram_fc_formats_w(), pbdma_formats_gp_fermi0_f() | pbdma_formats_pb_fermi1_f() | pbdma_formats_mp_fermi0_f()); - gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(), + nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(), pbdma_pb_header_priv_user_f() | pbdma_pb_header_method_zero_f() | pbdma_pb_header_subchannel_zero_f() | @@ -113,17 +113,17 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c, pbdma_pb_header_first_true_f() | pbdma_pb_header_type_inc_f()); - gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(), + nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(), pbdma_subdevice_id_f(1) | pbdma_subdevice_status_active_f() | pbdma_subdevice_channel_dma_enable_f()); - gk20a_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); + nvgpu_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); - gk20a_mem_wr32(g, mem, ram_fc_acquire_w(), + nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(), g->ops.fifo.pbdma_acquire_val(acquire_timeout)); - gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), + nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), pbdma_runlist_timeslice_timeout_128_f() | pbdma_runlist_timeslice_timescale_3_f() | pbdma_runlist_timeslice_enable_true_f()); @@ -132,11 +132,11 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c, gp10b_set_pdb_fault_replay_flags(c->g, mem); - gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); + nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); if (c->is_privileged_channel) { /* Set privilege level for channel */ - gk20a_mem_wr32(g, mem, ram_fc_config_w(), + nvgpu_mem_wr32(g, mem, ram_fc_config_w(), pbdma_config_auth_level_privileged_f()); gk20a_fifo_setup_ramfc_for_privileged_channel(c); @@ -158,7 +158,7 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) gk20a_dbg_fn(""); - v = gk20a_mem_rd32(c->g, &c->inst_block, + v = nvgpu_mem_rd32(c->g, &c->inst_block, ram_fc_allowed_syncpoints_w()); old_syncpt = pbdma_allowed_syncpoints_0_index_v(v); if (c->sync) @@ -178,7 +178,7 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); - gk20a_mem_wr32(c->g, &c->inst_block, + nvgpu_mem_wr32(c->g, &c->inst_block, ram_fc_allowed_syncpoints_w(), v); } diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index 95590e40..fc831e75 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c @@ -1039,51 +1039,51 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm, { struct mem_desc *mem = &gr_ctx->mem; - if (gk20a_mem_begin(g, mem)) { + if (nvgpu_mem_begin(g, mem)) { WARN_ON("Cannot map context"); return; } gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_magic_value_o()), ctxsw_prog_main_image_magic_value_v_value_v()); gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o())); gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_context_timestamp_buffer_ptr_o())); gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_context_timestamp_buffer_control_o())); gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_num_save_ops_o())); gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_num_wfi_save_ops_o())); gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_num_cta_save_ops_o())); gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_num_gfxp_save_ops_o())); gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_num_cilp_save_ops_o())); gk20a_err(dev_from_gk20a(g), "image gfx preemption option (GFXP is 1) %x\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o())); gk20a_err(dev_from_gk20a(g), "image compute preemption option (CTA is 1) %x\n", - gk20a_mem_rd(g, mem, + nvgpu_mem_rd(g, mem, ctxsw_prog_main_image_compute_preemption_options_o())); - gk20a_mem_end(g, mem); + nvgpu_mem_end(g, mem); } static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, @@ -1123,21 +1123,21 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, if (gr_ctx->graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) { gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); - gk20a_mem_wr(g, mem, + nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_graphics_preemption_options_o(), gfxp_preempt_option); } if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) { gk20a_dbg_info("CILP: %x", cilp_preempt_option); - gk20a_mem_wr(g, mem, + nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cilp_preempt_option); } if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CTA) { gk20a_dbg_info("CTA: %x", cta_preempt_option); - gk20a_mem_wr(g, mem, + nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_compute_preemption_options_o(), cta_preempt_option); } @@ -1147,7 +1147,7 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, u32 size; u32 cbes_reserve; - gk20a_mem_wr(g, mem, + nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_full_preemption_ptr_o(), gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va >> 8); @@ -2077,7 +2077,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch, gr_ctx->boosted_ctx = boost; - if (gk20a_mem_begin(g, mem)) + if (nvgpu_mem_begin(g, mem)) return -ENOMEM; err = gk20a_disable_channel_tsg(g, ch); @@ -2096,7 +2096,7 @@ static int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch, enable_ch: gk20a_enable_channel_tsg(g, ch); unmap_ctx: - gk20a_mem_end(g, mem); + nvgpu_mem_end(g, mem); return err; } @@ -2107,7 +2107,7 @@ static void gr_gp10b_update_boosted_ctx(struct gk20a *g, struct mem_desc *mem, v = ctxsw_prog_main_image_pmu_options_boost_clock_frequencies_f( gr_ctx->boosted_ctx); - gk20a_mem_wr(g, mem, ctxsw_prog_main_image_pmu_options_o(), v); + nvgpu_mem_wr(g, mem, ctxsw_prog_main_image_pmu_options_o(), v); } static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, @@ -2164,7 +2164,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, } } - if (gk20a_mem_begin(g, mem)) + if (nvgpu_mem_begin(g, mem)) return -ENOMEM; err = gk20a_disable_channel_tsg(g, ch); @@ -2191,7 +2191,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, enable_ch: gk20a_enable_channel_tsg(g, ch); unmap_ctx: - gk20a_mem_end(g, mem); + nvgpu_mem_end(g, mem); return err; } diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index a5322bad..8c6340f0 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c @@ -107,7 +107,7 @@ static int gb10b_init_bar2_mm_hw_setup(struct gk20a *g) gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa); gk20a_writel(g, bus_bar2_block_r(), - gk20a_aperture_mask(g, inst_block, + nvgpu_aperture_mask(g, inst_block, bus_bar2_block_target_sys_mem_ncoh_f(), bus_bar2_block_target_vid_mem_f()) | bus_bar2_block_mode_virtual_f() | @@ -162,7 +162,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm, u32 kind_v, u64 *ctag, bool cacheable, bool unmapped_pte, int rw_flag, bool sparse, bool priv, - enum gk20a_aperture aperture) + enum nvgpu_aperture aperture) { struct gk20a *g = gk20a_from_vm(vm); u64 pte_addr = 0; @@ -174,7 +174,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm, pte_addr = gk20a_pde_addr(g, pte) >> gmmu_new_pde_address_shift_v(); - pde_v[0] |= gk20a_aperture_mask(g, &pte->mem, + pde_v[0] |= nvgpu_aperture_mask(g, &pte->mem, gmmu_new_pde_aperture_sys_mem_ncoh_f(), gmmu_new_pde_aperture_video_memory_f()); pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr)); @@ -205,7 +205,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm, u32 kind_v, u64 *ctag, bool cacheable, bool unmapped_pte, int rw_flag, bool sparse, bool priv, - enum gk20a_aperture aperture) + enum nvgpu_aperture aperture) { struct gk20a *g = gk20a_from_vm(vm); bool small_valid, big_valid; @@ -230,7 +230,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm, if (small_valid) { pde_v[2] |= gmmu_new_dual_pde_address_small_sys_f(pte_addr_small); - pde_v[2] |= gk20a_aperture_mask(g, &entry->mem, + pde_v[2] |= nvgpu_aperture_mask(g, &entry->mem, gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(), gmmu_new_dual_pde_aperture_small_video_memory_f()); pde_v[2] |= gmmu_new_dual_pde_vol_small_true_f(); @@ -240,7 +240,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm, if (big_valid) { pde_v[0] |= gmmu_new_dual_pde_address_big_sys_f(pte_addr_big); pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f(); - pde_v[0] |= gk20a_aperture_mask(g, &entry->mem, + pde_v[0] |= nvgpu_aperture_mask(g, &entry->mem, gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(), gmmu_new_dual_pde_aperture_big_video_memory_f()); pde_v[1] |= pte_addr_big >> 28; @@ -268,7 +268,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm, u32 kind_v, u64 *ctag, bool cacheable, bool unmapped_pte, int rw_flag, bool sparse, bool priv, - enum gk20a_aperture aperture) + enum nvgpu_aperture aperture) { struct gk20a *g = vm->mm->g; u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; @@ -284,7 +284,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm, u32 pte_addr = aperture == APERTURE_SYSMEM ? gmmu_new_pte_address_sys_f(iova_v) : gmmu_new_pte_address_vid_f(iova_v); - u32 pte_tgt = __gk20a_aperture_mask(g, aperture, + u32 pte_tgt = __nvgpu_aperture_mask(g, aperture, gmmu_new_pte_aperture_sys_mem_ncoh_f(), gmmu_new_pte_aperture_video_memory_f()); @@ -384,15 +384,15 @@ static void gp10b_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block, gk20a_dbg_info("pde pa=0x%llx", pdb_addr); - gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), - gk20a_aperture_mask(g, &vm->pdb.mem, + nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), + nvgpu_aperture_mask(g, &vm->pdb.mem, ram_in_page_dir_base_target_sys_mem_ncoh_f(), ram_in_page_dir_base_target_vid_mem_f()) | ram_in_page_dir_base_vol_true_f() | ram_in_page_dir_base_lo_f(pdb_addr_lo) | 1 << 10); - gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), + nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(), ram_in_page_dir_base_hi_f(pdb_addr_hi)); } -- cgit v1.2.2