summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c54
1 files changed, 27 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 6227d523..4ea9b911 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -1414,11 +1414,11 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
1414 struct gk20a *g = gk20a_from_pmu(pmu); 1414 struct gk20a *g = gk20a_from_pmu(pmu);
1415 struct pmu_v *pv = &g->ops.pmu_ver; 1415 struct pmu_v *pv = &g->ops.pmu_ver;
1416 1416
1417 mutex_init(&pmu->elpg_mutex); 1417 nvgpu_mutex_init(&pmu->elpg_mutex);
1418 mutex_init(&pmu->pg_mutex); 1418 nvgpu_mutex_init(&pmu->pg_mutex);
1419 mutex_init(&pmu->isr_mutex); 1419 nvgpu_mutex_init(&pmu->isr_mutex);
1420 mutex_init(&pmu->pmu_copy_lock); 1420 nvgpu_mutex_init(&pmu->pmu_copy_lock);
1421 mutex_init(&pmu->pmu_seq_lock); 1421 nvgpu_mutex_init(&pmu->pmu_seq_lock);
1422 1422
1423 pmu->remove_support = gk20a_remove_pmu_support; 1423 pmu->remove_support = gk20a_remove_pmu_support;
1424 1424
@@ -2189,7 +2189,7 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu,
2189 return; 2189 return;
2190 } 2190 }
2191 2191
2192 mutex_lock(&pmu->pmu_copy_lock); 2192 nvgpu_mutex_acquire(&pmu->pmu_copy_lock);
2193 2193
2194 words = size >> 2; 2194 words = size >> 2;
2195 bytes = size & 0x3; 2195 bytes = size & 0x3;
@@ -2211,7 +2211,7 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu,
2211 dst[(words << 2) + i] = ((u8 *)&data)[i]; 2211 dst[(words << 2) + i] = ((u8 *)&data)[i];
2212 } 2212 }
2213 } 2213 }
2214 mutex_unlock(&pmu->pmu_copy_lock); 2214 nvgpu_mutex_release(&pmu->pmu_copy_lock);
2215 return; 2215 return;
2216} 2216}
2217 2217
@@ -2235,7 +2235,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
2235 return; 2235 return;
2236 } 2236 }
2237 2237
2238 mutex_lock(&pmu->pmu_copy_lock); 2238 nvgpu_mutex_acquire(&pmu->pmu_copy_lock);
2239 2239
2240 words = size >> 2; 2240 words = size >> 2;
2241 bytes = size & 0x3; 2241 bytes = size & 0x3;
@@ -2265,7 +2265,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
2265 "copy failed. bytes written %d, expected %d", 2265 "copy failed. bytes written %d, expected %d",
2266 data - dst, size); 2266 data - dst, size);
2267 } 2267 }
2268 mutex_unlock(&pmu->pmu_copy_lock); 2268 nvgpu_mutex_release(&pmu->pmu_copy_lock);
2269 return; 2269 return;
2270} 2270}
2271 2271
@@ -2571,17 +2571,17 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu,
2571 struct pmu_sequence *seq; 2571 struct pmu_sequence *seq;
2572 u32 index; 2572 u32 index;
2573 2573
2574 mutex_lock(&pmu->pmu_seq_lock); 2574 nvgpu_mutex_acquire(&pmu->pmu_seq_lock);
2575 index = find_first_zero_bit(pmu->pmu_seq_tbl, 2575 index = find_first_zero_bit(pmu->pmu_seq_tbl,
2576 sizeof(pmu->pmu_seq_tbl)); 2576 sizeof(pmu->pmu_seq_tbl));
2577 if (index >= sizeof(pmu->pmu_seq_tbl)) { 2577 if (index >= sizeof(pmu->pmu_seq_tbl)) {
2578 gk20a_err(dev_from_gk20a(g), 2578 gk20a_err(dev_from_gk20a(g),
2579 "no free sequence available"); 2579 "no free sequence available");
2580 mutex_unlock(&pmu->pmu_seq_lock); 2580 nvgpu_mutex_release(&pmu->pmu_seq_lock);
2581 return -EAGAIN; 2581 return -EAGAIN;
2582 } 2582 }
2583 set_bit(index, pmu->pmu_seq_tbl); 2583 set_bit(index, pmu->pmu_seq_tbl);
2584 mutex_unlock(&pmu->pmu_seq_lock); 2584 nvgpu_mutex_release(&pmu->pmu_seq_lock);
2585 2585
2586 seq = &pmu->seq[index]; 2586 seq = &pmu->seq[index];
2587 seq->state = PMU_SEQ_STATE_PENDING; 2587 seq->state = PMU_SEQ_STATE_PENDING;
@@ -2616,7 +2616,7 @@ static int pmu_queue_init(struct pmu_gk20a *pmu,
2616 queue->id = id; 2616 queue->id = id;
2617 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init); 2617 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init);
2618 queue->mutex_id = id; 2618 queue->mutex_id = id;
2619 mutex_init(&queue->mutex); 2619 nvgpu_mutex_init(&queue->mutex);
2620 2620
2621 gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x", 2621 gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x",
2622 id, queue->index, queue->offset, queue->size); 2622 id, queue->index, queue->offset, queue->size);
@@ -2831,7 +2831,7 @@ static int pmu_queue_lock(struct pmu_gk20a *pmu,
2831 return 0; 2831 return 0;
2832 2832
2833 if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { 2833 if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) {
2834 mutex_lock(&queue->mutex); 2834 nvgpu_mutex_acquire(&queue->mutex);
2835 return 0; 2835 return 0;
2836 } 2836 }
2837 2837
@@ -2848,7 +2848,7 @@ static int pmu_queue_unlock(struct pmu_gk20a *pmu,
2848 return 0; 2848 return 0;
2849 2849
2850 if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { 2850 if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) {
2851 mutex_unlock(&queue->mutex); 2851 nvgpu_mutex_release(&queue->mutex);
2852 return 0; 2852 return 0;
2853 } 2853 }
2854 2854
@@ -3245,10 +3245,10 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
3245 3245
3246 gk20a_dbg_fn(""); 3246 gk20a_dbg_fn("");
3247 3247
3248 mutex_lock(&pmu->isr_mutex); 3248 nvgpu_mutex_acquire(&pmu->isr_mutex);
3249 pmu_reset(pmu); 3249 pmu_reset(pmu);
3250 pmu->isr_enabled = true; 3250 pmu->isr_enabled = true;
3251 mutex_unlock(&pmu->isr_mutex); 3251 nvgpu_mutex_release(&pmu->isr_mutex);
3252 3252
3253 /* setup apertures - virtual */ 3253 /* setup apertures - virtual */
3254 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), 3254 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
@@ -4530,9 +4530,9 @@ void gk20a_pmu_isr(struct gk20a *g)
4530 4530
4531 gk20a_dbg_fn(""); 4531 gk20a_dbg_fn("");
4532 4532
4533 mutex_lock(&pmu->isr_mutex); 4533 nvgpu_mutex_acquire(&pmu->isr_mutex);
4534 if (!pmu->isr_enabled) { 4534 if (!pmu->isr_enabled) {
4535 mutex_unlock(&pmu->isr_mutex); 4535 nvgpu_mutex_release(&pmu->isr_mutex);
4536 return; 4536 return;
4537 } 4537 }
4538 4538
@@ -4546,7 +4546,7 @@ void gk20a_pmu_isr(struct gk20a *g)
4546 intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; 4546 intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask;
4547 if (!intr || pmu->pmu_state == PMU_STATE_OFF) { 4547 if (!intr || pmu->pmu_state == PMU_STATE_OFF) {
4548 gk20a_writel(g, pwr_falcon_irqsclr_r(), intr); 4548 gk20a_writel(g, pwr_falcon_irqsclr_r(), intr);
4549 mutex_unlock(&pmu->isr_mutex); 4549 nvgpu_mutex_release(&pmu->isr_mutex);
4550 return; 4550 return;
4551 } 4551 }
4552 4552
@@ -4583,7 +4583,7 @@ void gk20a_pmu_isr(struct gk20a *g)
4583 pwr_falcon_irqsset_swgen0_set_f()); 4583 pwr_falcon_irqsset_swgen0_set_f());
4584 } 4584 }
4585 4585
4586 mutex_unlock(&pmu->isr_mutex); 4586 nvgpu_mutex_release(&pmu->isr_mutex);
4587} 4587}
4588 4588
4589static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, 4589static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
@@ -4987,7 +4987,7 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
4987 if (!support_gk20a_pmu(g->dev)) 4987 if (!support_gk20a_pmu(g->dev))
4988 return ret; 4988 return ret;
4989 4989
4990 mutex_lock(&pmu->elpg_mutex); 4990 nvgpu_mutex_acquire(&pmu->elpg_mutex);
4991 4991
4992 pmu->elpg_refcnt++; 4992 pmu->elpg_refcnt++;
4993 if (pmu->elpg_refcnt <= 0) 4993 if (pmu->elpg_refcnt <= 0)
@@ -5026,7 +5026,7 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
5026 } 5026 }
5027 5027
5028exit_unlock: 5028exit_unlock:
5029 mutex_unlock(&pmu->elpg_mutex); 5029 nvgpu_mutex_release(&pmu->elpg_mutex);
5030 gk20a_dbg_fn("done"); 5030 gk20a_dbg_fn("done");
5031 return ret; 5031 return ret;
5032} 5032}
@@ -5049,7 +5049,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
5049 if (!support_gk20a_pmu(g->dev)) 5049 if (!support_gk20a_pmu(g->dev))
5050 return ret; 5050 return ret;
5051 5051
5052 mutex_lock(&pmu->elpg_mutex); 5052 nvgpu_mutex_acquire(&pmu->elpg_mutex);
5053 5053
5054 pmu->elpg_refcnt--; 5054 pmu->elpg_refcnt--;
5055 if (pmu->elpg_refcnt > 0) { 5055 if (pmu->elpg_refcnt > 0) {
@@ -5138,7 +5138,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
5138 5138
5139exit_reschedule: 5139exit_reschedule:
5140exit_unlock: 5140exit_unlock:
5141 mutex_unlock(&pmu->elpg_mutex); 5141 nvgpu_mutex_release(&pmu->elpg_mutex);
5142 gk20a_dbg_fn("done"); 5142 gk20a_dbg_fn("done");
5143 return ret; 5143 return ret;
5144} 5144}
@@ -5182,9 +5182,9 @@ int gk20a_pmu_destroy(struct gk20a *g)
5182 g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time; 5182 g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time;
5183 g->pg_gating_cnt += pg_stat_data.gating_cnt; 5183 g->pg_gating_cnt += pg_stat_data.gating_cnt;
5184 5184
5185 mutex_lock(&pmu->isr_mutex); 5185 nvgpu_mutex_acquire(&pmu->isr_mutex);
5186 pmu->isr_enabled = false; 5186 pmu->isr_enabled = false;
5187 mutex_unlock(&pmu->isr_mutex); 5187 nvgpu_mutex_release(&pmu->isr_mutex);
5188 5188
5189 pmu->pmu_state = PMU_STATE_OFF; 5189 pmu->pmu_state = PMU_STATE_OFF;
5190 pmu->pmu_ready = false; 5190 pmu->pmu_ready = false;