summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c64
1 files changed, 32 insertions, 32 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index d3b91a50..aad6c07b 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -538,7 +538,7 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
538 struct gr_gk20a *gr = &g->gr; 538 struct gr_gk20a *gr = &g->gr;
539 int ret; 539 int ret;
540 540
541 mutex_lock(&gr->fecs_mutex); 541 nvgpu_mutex_acquire(&gr->fecs_mutex);
542 542
543 if (op.mailbox.id != 0) 543 if (op.mailbox.id != 0)
544 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(op.mailbox.id), 544 gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(op.mailbox.id),
@@ -561,7 +561,7 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
561 op.cond.fail, op.mailbox.fail, 561 op.cond.fail, op.mailbox.fail,
562 sleepduringwait); 562 sleepduringwait);
563 563
564 mutex_unlock(&gr->fecs_mutex); 564 nvgpu_mutex_release(&gr->fecs_mutex);
565 565
566 return ret; 566 return ret;
567} 567}
@@ -573,7 +573,7 @@ int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g,
573 struct gr_gk20a *gr = &g->gr; 573 struct gr_gk20a *gr = &g->gr;
574 int ret; 574 int ret;
575 575
576 mutex_lock(&gr->fecs_mutex); 576 nvgpu_mutex_acquire(&gr->fecs_mutex);
577 577
578 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(op.mailbox.id), 578 gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(op.mailbox.id),
579 gr_fecs_ctxsw_mailbox_clear_value_f(op.mailbox.clr)); 579 gr_fecs_ctxsw_mailbox_clear_value_f(op.mailbox.clr));
@@ -587,7 +587,7 @@ int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g,
587 op.cond.fail, op.mailbox.fail, 587 op.cond.fail, op.mailbox.fail,
588 false); 588 false);
589 589
590 mutex_unlock(&gr->fecs_mutex); 590 nvgpu_mutex_release(&gr->fecs_mutex);
591 591
592 return ret; 592 return ret;
593} 593}
@@ -1596,7 +1596,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
1596 /* golden ctx is global to all channels. Although only the first 1596 /* golden ctx is global to all channels. Although only the first
1597 channel initializes golden image, driver needs to prevent multiple 1597 channel initializes golden image, driver needs to prevent multiple
1598 channels from initializing golden ctx at the same time */ 1598 channels from initializing golden ctx at the same time */
1599 mutex_lock(&gr->ctx_mutex); 1599 nvgpu_mutex_acquire(&gr->ctx_mutex);
1600 1600
1601 if (gr->ctx_vars.golden_image_initialized) { 1601 if (gr->ctx_vars.golden_image_initialized) {
1602 goto clean_up; 1602 goto clean_up;
@@ -1825,7 +1825,7 @@ clean_up:
1825 gk20a_mem_end(g, gold_mem); 1825 gk20a_mem_end(g, gold_mem);
1826 gk20a_mem_end(g, gr_mem); 1826 gk20a_mem_end(g, gr_mem);
1827 1827
1828 mutex_unlock(&gr->ctx_mutex); 1828 nvgpu_mutex_release(&gr->ctx_mutex);
1829 return err; 1829 return err;
1830} 1830}
1831 1831
@@ -3327,7 +3327,7 @@ out:
3327int gk20a_comptag_allocator_init(struct gk20a_comptag_allocator *allocator, 3327int gk20a_comptag_allocator_init(struct gk20a_comptag_allocator *allocator,
3328 unsigned long size) 3328 unsigned long size)
3329{ 3329{
3330 mutex_init(&allocator->lock); 3330 nvgpu_mutex_init(&allocator->lock);
3331 /* 3331 /*
3332 * 0th comptag is special and is never used. The base for this bitmap 3332 * 0th comptag is special and is never used. The base for this bitmap
3333 * is 1, and its size is one less than the size of comptag store. 3333 * is 1, and its size is one less than the size of comptag store.
@@ -4064,7 +4064,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
4064 4064
4065 /* no endian swap ? */ 4065 /* no endian swap ? */
4066 4066
4067 mutex_lock(&gr->zbc_lock); 4067 nvgpu_mutex_acquire(&gr->zbc_lock);
4068 switch (zbc_val->type) { 4068 switch (zbc_val->type) {
4069 case GK20A_ZBC_TYPE_COLOR: 4069 case GK20A_ZBC_TYPE_COLOR:
4070 /* search existing tables */ 4070 /* search existing tables */
@@ -4159,7 +4159,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
4159 } 4159 }
4160 4160
4161err_mutex: 4161err_mutex:
4162 mutex_unlock(&gr->zbc_lock); 4162 nvgpu_mutex_release(&gr->zbc_lock);
4163 return ret; 4163 return ret;
4164} 4164}
4165 4165
@@ -4267,7 +4267,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4267 struct zbc_entry zbc_val; 4267 struct zbc_entry zbc_val;
4268 u32 i, err; 4268 u32 i, err;
4269 4269
4270 mutex_init(&gr->zbc_lock); 4270 nvgpu_mutex_init(&gr->zbc_lock);
4271 4271
4272 /* load default color table */ 4272 /* load default color table */
4273 zbc_val.type = GK20A_ZBC_TYPE_COLOR; 4273 zbc_val.type = GK20A_ZBC_TYPE_COLOR;
@@ -5136,7 +5136,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
5136 gr->g = g; 5136 gr->g = g;
5137 5137
5138#if defined(CONFIG_GK20A_CYCLE_STATS) 5138#if defined(CONFIG_GK20A_CYCLE_STATS)
5139 mutex_init(&g->gr.cs_lock); 5139 nvgpu_mutex_init(&g->gr.cs_lock);
5140#endif 5140#endif
5141 5141
5142 err = gr_gk20a_init_gr_config(g, gr); 5142 err = gr_gk20a_init_gr_config(g, gr);
@@ -5172,8 +5172,8 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
5172 5172
5173 gr_gk20a_load_zbc_default_table(g, gr); 5173 gr_gk20a_load_zbc_default_table(g, gr);
5174 5174
5175 mutex_init(&gr->ctx_mutex); 5175 nvgpu_mutex_init(&gr->ctx_mutex);
5176 spin_lock_init(&gr->ch_tlb_lock); 5176 nvgpu_spinlock_init(&gr->ch_tlb_lock);
5177 5177
5178 gr->remove_support = gk20a_remove_gr_support; 5178 gr->remove_support = gk20a_remove_gr_support;
5179 gr->sw_ready = true; 5179 gr->sw_ready = true;
@@ -5244,7 +5244,7 @@ int gk20a_init_gr_support(struct gk20a *g)
5244 gk20a_dbg_fn(""); 5244 gk20a_dbg_fn("");
5245 5245
5246 /* this is required before gr_gk20a_init_ctx_state */ 5246 /* this is required before gr_gk20a_init_ctx_state */
5247 mutex_init(&g->gr.fecs_mutex); 5247 nvgpu_mutex_init(&g->gr.fecs_mutex);
5248 5248
5249 err = gr_gk20a_init_ctxsw(g); 5249 err = gr_gk20a_init_ctxsw(g);
5250 if (err) 5250 if (err)
@@ -5468,7 +5468,7 @@ int gk20a_gr_reset(struct gk20a *g)
5468 int err; 5468 int err;
5469 u32 size; 5469 u32 size;
5470 5470
5471 mutex_lock(&g->gr.fecs_mutex); 5471 nvgpu_mutex_acquire(&g->gr.fecs_mutex);
5472 5472
5473 err = gk20a_enable_gr_hw(g); 5473 err = gk20a_enable_gr_hw(g);
5474 if (err) 5474 if (err)
@@ -5482,7 +5482,7 @@ int gk20a_gr_reset(struct gk20a *g)
5482 if (err) 5482 if (err)
5483 return err; 5483 return err;
5484 5484
5485 mutex_unlock(&g->gr.fecs_mutex); 5485 nvgpu_mutex_release(&g->gr.fecs_mutex);
5486 5486
5487 /* this appears query for sw states but fecs actually init 5487 /* this appears query for sw states but fecs actually init
5488 ramchain, etc so this is hw init */ 5488 ramchain, etc so this is hw init */
@@ -5731,7 +5731,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5731 if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0)) 5731 if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0))
5732 return 0; 5732 return 0;
5733 5733
5734 mutex_lock(&ch->cyclestate.cyclestate_buffer_mutex); 5734 nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
5735 5735
5736 virtual_address = ch->cyclestate.cyclestate_buffer; 5736 virtual_address = ch->cyclestate.cyclestate_buffer;
5737 buffer_size = ch->cyclestate.cyclestate_buffer_size; 5737 buffer_size = ch->cyclestate.cyclestate_buffer_size;
@@ -5843,7 +5843,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5843 sh_hdr->completed = true; 5843 sh_hdr->completed = true;
5844 offset += sh_hdr->size; 5844 offset += sh_hdr->size;
5845 } 5845 }
5846 mutex_unlock(&ch->cyclestate.cyclestate_buffer_mutex); 5846 nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
5847#endif 5847#endif
5848 gk20a_dbg_fn(""); 5848 gk20a_dbg_fn("");
5849 wake_up(&ch->notifier_wq); 5849 wake_up(&ch->notifier_wq);
@@ -5874,7 +5874,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
5874 if (!gr_fecs_current_ctx_valid_v(curr_ctx)) 5874 if (!gr_fecs_current_ctx_valid_v(curr_ctx))
5875 return NULL; 5875 return NULL;
5876 5876
5877 spin_lock(&gr->ch_tlb_lock); 5877 nvgpu_spinlock_acquire(&gr->ch_tlb_lock);
5878 5878
5879 /* check cache first */ 5879 /* check cache first */
5880 for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { 5880 for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
@@ -5926,7 +5926,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
5926 (GR_CHANNEL_MAP_TLB_SIZE - 1); 5926 (GR_CHANNEL_MAP_TLB_SIZE - 1);
5927 5927
5928unlock: 5928unlock:
5929 spin_unlock(&gr->ch_tlb_lock); 5929 nvgpu_spinlock_release(&gr->ch_tlb_lock);
5930 if (curr_tsgid) 5930 if (curr_tsgid)
5931 *curr_tsgid = tsgid; 5931 *curr_tsgid = tsgid;
5932 return ret; 5932 return ret;
@@ -5998,7 +5998,7 @@ static int gk20a_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc)
5998 GPU_LIT_TPC_IN_GPC_STRIDE); 5998 GPU_LIT_TPC_IN_GPC_STRIDE);
5999 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; 5999 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
6000 6000
6001 mutex_lock(&g->dbg_sessions_lock); 6001 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
6002 6002
6003 sm_id = gr_gpc0_tpc0_sm_cfg_sm_id_v(gk20a_readl(g, 6003 sm_id = gr_gpc0_tpc0_sm_cfg_sm_id_v(gk20a_readl(g,
6004 gr_gpc0_tpc0_sm_cfg_r() + offset)); 6004 gr_gpc0_tpc0_sm_cfg_r() + offset));
@@ -6012,7 +6012,7 @@ static int gk20a_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc)
6012 gr->sm_error_states[sm_id].hww_warp_esr_report_mask = gk20a_readl(g, 6012 gr->sm_error_states[sm_id].hww_warp_esr_report_mask = gk20a_readl(g,
6013 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset); 6013 gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset);
6014 6014
6015 mutex_unlock(&g->dbg_sessions_lock); 6015 nvgpu_mutex_release(&g->dbg_sessions_lock);
6016 6016
6017 return 0; 6017 return 0;
6018} 6018}
@@ -6029,7 +6029,7 @@ static int gk20a_gr_update_sm_error_state(struct gk20a *g,
6029 GPU_LIT_TPC_IN_GPC_STRIDE); 6029 GPU_LIT_TPC_IN_GPC_STRIDE);
6030 int err = 0; 6030 int err = 0;
6031 6031
6032 mutex_lock(&g->dbg_sessions_lock); 6032 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
6033 6033
6034 gr->sm_error_states[sm_id].hww_global_esr = 6034 gr->sm_error_states[sm_id].hww_global_esr =
6035 sm_error_state->hww_global_esr; 6035 sm_error_state->hww_global_esr;
@@ -6081,7 +6081,7 @@ enable_ctxsw:
6081 err = gr_gk20a_enable_ctxsw(g); 6081 err = gr_gk20a_enable_ctxsw(g);
6082 6082
6083fail: 6083fail:
6084 mutex_unlock(&g->dbg_sessions_lock); 6084 nvgpu_mutex_release(&g->dbg_sessions_lock);
6085 return err; 6085 return err;
6086} 6086}
6087 6087
@@ -6096,7 +6096,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g,
6096 GPU_LIT_TPC_IN_GPC_STRIDE); 6096 GPU_LIT_TPC_IN_GPC_STRIDE);
6097 int err = 0; 6097 int err = 0;
6098 6098
6099 mutex_lock(&g->dbg_sessions_lock); 6099 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
6100 6100
6101 memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states)); 6101 memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states));
6102 6102
@@ -6122,7 +6122,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g,
6122 err = gr_gk20a_enable_ctxsw(g); 6122 err = gr_gk20a_enable_ctxsw(g);
6123 6123
6124fail: 6124fail:
6125 mutex_unlock(&g->dbg_sessions_lock); 6125 nvgpu_mutex_release(&g->dbg_sessions_lock);
6126 return err; 6126 return err;
6127} 6127}
6128 6128
@@ -9128,7 +9128,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9128 struct dbg_session_channel_data *ch_data; 9128 struct dbg_session_channel_data *ch_data;
9129 int err = 0; 9129 int err = 0;
9130 9130
9131 mutex_lock(&g->dbg_sessions_lock); 9131 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
9132 9132
9133 err = gr_gk20a_disable_ctxsw(g); 9133 err = gr_gk20a_disable_ctxsw(g);
9134 if (err) { 9134 if (err) {
@@ -9136,7 +9136,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9136 goto clean_up; 9136 goto clean_up;
9137 } 9137 }
9138 9138
9139 mutex_lock(&dbg_s->ch_list_lock); 9139 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
9140 9140
9141 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { 9141 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) {
9142 ch = g->fifo.channel + ch_data->chid; 9142 ch = g->fifo.channel + ch_data->chid;
@@ -9146,7 +9146,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9146 local_ctx_resident_ch_fd = ch_data->channel_fd; 9146 local_ctx_resident_ch_fd = ch_data->channel_fd;
9147 } 9147 }
9148 9148
9149 mutex_unlock(&dbg_s->ch_list_lock); 9149 nvgpu_mutex_release(&dbg_s->ch_list_lock);
9150 9150
9151 err = gr_gk20a_enable_ctxsw(g); 9151 err = gr_gk20a_enable_ctxsw(g);
9152 if (err) 9152 if (err)
@@ -9155,7 +9155,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9155 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9155 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9156 9156
9157clean_up: 9157clean_up:
9158 mutex_unlock(&g->dbg_sessions_lock); 9158 nvgpu_mutex_release(&g->dbg_sessions_lock);
9159 9159
9160 return err; 9160 return err;
9161} 9161}
@@ -9170,7 +9170,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
9170 int err = 0; 9170 int err = 0;
9171 struct dbg_session_channel_data *ch_data; 9171 struct dbg_session_channel_data *ch_data;
9172 9172
9173 mutex_lock(&g->dbg_sessions_lock); 9173 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
9174 9174
9175 err = gr_gk20a_disable_ctxsw(g); 9175 err = gr_gk20a_disable_ctxsw(g);
9176 if (err) { 9176 if (err) {
@@ -9193,7 +9193,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
9193 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9193 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9194 9194
9195clean_up: 9195clean_up:
9196 mutex_unlock(&g->dbg_sessions_lock); 9196 nvgpu_mutex_release(&g->dbg_sessions_lock);
9197 9197
9198 return err; 9198 return err;
9199} 9199}