From 18a017865946617fd63256858a0d2300160643f4 Mon Sep 17 00:00:00 2001 From: Konsta Holtta Date: Thu, 12 May 2016 09:31:30 +0300 Subject: gpu: nvgpu: refactor gk20a_mem_{wr,rd} for vidmem To support vidmem, pass g and mem_desc to the buffer memory accessor functions. This allows the functions to select the memory access method based on the buffer aperture instead of using the cpu pointer directly (like until now). The selection and aperture support will be in another patch; this patch only refactors these accessors, but keeps the underlying functionality as-is. JIRA DNVGPU-23 Change-Id: I21d4a54827b0e2741012dfde7952c0555a583435 Signed-off-by: Konsta Holtta Reviewed-on: http://git-master/r/1121914 GVS: Gerrit_Virtual_Submit Reviewed-by: Ken Adams --- drivers/gpu/nvgpu/gp10b/fifo_gp10b.c | 61 ++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/nvgpu/gp10b/fifo_gp10b.c') diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c index 9cb26d3f..4766e0e4 100644 --- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c @@ -25,24 +25,24 @@ #include "hw_ram_gp10b.h" static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g, - void *inst_ptr) + struct mem_desc *mem) { u32 val; gk20a_dbg_fn(""); - val = gk20a_mem_rd32(inst_ptr, + val = gk20a_mem_rd32(g, mem, ram_in_page_dir_base_fault_replay_tex_w()); val &= ~ram_in_page_dir_base_fault_replay_tex_m(); val |= ram_in_page_dir_base_fault_replay_tex_true_f(); - gk20a_mem_wr32(inst_ptr, + gk20a_mem_wr32(g, mem, ram_in_page_dir_base_fault_replay_tex_w(), val); - val = gk20a_mem_rd32(inst_ptr, + val = gk20a_mem_rd32(g, mem, ram_in_page_dir_base_fault_replay_gcc_w()); val &= ~ram_in_page_dir_base_fault_replay_gcc_m(); val |= ram_in_page_dir_base_fault_replay_gcc_true_f(); - gk20a_mem_wr32(inst_ptr, + gk20a_mem_wr32(g, mem, ram_in_page_dir_base_fault_replay_gcc_w(), val); gk20a_dbg_fn("done"); @@ -52,28 +52,25 @@ static int channel_gp10b_commit_userd(struct channel_gk20a *c) { u32 addr_lo; u32 addr_hi; - void *inst_ptr; struct gk20a *g = c->g; gk20a_dbg_fn(""); - inst_ptr = c->inst_block.cpu_va; - if (!inst_ptr) - return -ENOMEM; - addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); addr_hi = u64_hi32(c->userd_iova); gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", c->hw_chid, (u64)c->userd_iova); - gk20a_mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_w(), + gk20a_mem_wr32(g, &c->inst_block, + ram_in_ramfc_w() + ram_fc_userd_w(), (g->mm.vidmem_is_vidmem ? pbdma_userd_target_sys_mem_ncoh_f() : pbdma_userd_target_vid_mem_f()) | pbdma_userd_addr_f(addr_lo)); - gk20a_mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_hi_w(), + gk20a_mem_wr32(g, &c->inst_block, + ram_in_ramfc_w() + ram_fc_userd_hi_w(), pbdma_userd_hi_addr_f(addr_hi)); return 0; @@ -82,33 +79,30 @@ static int channel_gp10b_commit_userd(struct channel_gk20a *c) static int channel_gp10b_setup_ramfc(struct channel_gk20a *c, u64 gpfifo_base, u32 gpfifo_entries, u32 flags) { - void *inst_ptr; + struct gk20a *g = c->g; + struct mem_desc *mem = &c->inst_block; gk20a_dbg_fn(""); - inst_ptr = c->inst_block.cpu_va; - if (!inst_ptr) - return -ENOMEM; + gk20a_memset(g, mem, 0, 0, ram_fc_size_val_v()); - memset(inst_ptr, 0, ram_fc_size_val_v()); - - gk20a_mem_wr32(inst_ptr, ram_fc_gp_base_w(), + gk20a_mem_wr32(g, mem, ram_fc_gp_base_w(), pbdma_gp_base_offset_f( u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); - gk20a_mem_wr32(inst_ptr, ram_fc_gp_base_hi_w(), + gk20a_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); - gk20a_mem_wr32(inst_ptr, ram_fc_signature_w(), + gk20a_mem_wr32(g, mem, ram_fc_signature_w(), c->g->ops.fifo.get_pbdma_signature(c->g)); - gk20a_mem_wr32(inst_ptr, ram_fc_formats_w(), + gk20a_mem_wr32(g, mem, ram_fc_formats_w(), pbdma_formats_gp_fermi0_f() | pbdma_formats_pb_fermi1_f() | pbdma_formats_mp_fermi0_f()); - gk20a_mem_wr32(inst_ptr, ram_fc_pb_header_w(), + gk20a_mem_wr32(g, mem, ram_fc_pb_header_w(), pbdma_pb_header_priv_user_f() | pbdma_pb_header_method_zero_f() | pbdma_pb_header_subchannel_zero_f() | @@ -116,26 +110,26 @@ static int channel_gp10b_setup_ramfc(struct channel_gk20a *c, pbdma_pb_header_first_true_f() | pbdma_pb_header_type_inc_f()); - gk20a_mem_wr32(inst_ptr, ram_fc_subdevice_w(), + gk20a_mem_wr32(g, mem, ram_fc_subdevice_w(), pbdma_subdevice_id_f(1) | pbdma_subdevice_status_active_f() | pbdma_subdevice_channel_dma_enable_f()); - gk20a_mem_wr32(inst_ptr, ram_fc_target_w(), pbdma_target_engine_sw_f()); + gk20a_mem_wr32(g, mem, ram_fc_target_w(), pbdma_target_engine_sw_f()); - gk20a_mem_wr32(inst_ptr, ram_fc_acquire_w(), + gk20a_mem_wr32(g, mem, ram_fc_acquire_w(), channel_gk20a_pbdma_acquire_val(c)); - gk20a_mem_wr32(inst_ptr, ram_fc_runlist_timeslice_w(), + gk20a_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), pbdma_runlist_timeslice_timeout_128_f() | pbdma_runlist_timeslice_timescale_3_f() | pbdma_runlist_timeslice_enable_true_f()); if ( flags & NVGPU_ALLOC_GPFIFO_FLAGS_REPLAYABLE_FAULTS_ENABLE) - gp10b_set_pdb_fault_replay_flags(c->g, inst_ptr); + gp10b_set_pdb_fault_replay_flags(c->g, mem); - gk20a_mem_wr32(inst_ptr, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); + gk20a_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid)); return channel_gp10b_commit_userd(c); } @@ -149,14 +143,12 @@ static u32 gp10b_fifo_get_pbdma_signature(struct gk20a *g) static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) { u32 new_syncpt = 0, old_syncpt; - void *inst_ptr; u32 v; gk20a_dbg_fn(""); - inst_ptr = c->inst_block.cpu_va; - - v = gk20a_mem_rd32(inst_ptr, ram_fc_allowed_syncpoints_w()); + v = gk20a_mem_rd32(c->g, &c->inst_block, + ram_fc_allowed_syncpoints_w()); old_syncpt = pbdma_allowed_syncpoints_0_index_v(v); if (c->sync) new_syncpt = c->sync->syncpt_id(c->sync); @@ -175,7 +167,8 @@ static int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); - gk20a_mem_wr32(inst_ptr, ram_fc_allowed_syncpoints_w(), v); + gk20a_mem_wr32(c->g, &c->inst_block, + ram_fc_allowed_syncpoints_w(), v); } /* enable channel */ -- cgit v1.2.2