summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/clk
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/clk
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/clk')
-rw-r--r--drivers/gpu/nvgpu/clk/clk_arb.c50
-rw-r--r--drivers/gpu/nvgpu/clk/clk_mclk.c16
-rw-r--r--drivers/gpu/nvgpu/clk/clk_mclk.h8
3 files changed, 37 insertions, 37 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk_arb.c b/drivers/gpu/nvgpu/clk/clk_arb.c
index 9232c3dc..062e4e2b 100644
--- a/drivers/gpu/nvgpu/clk/clk_arb.c
+++ b/drivers/gpu/nvgpu/clk/clk_arb.c
@@ -18,7 +18,7 @@
18#include <linux/anon_inodes.h> 18#include <linux/anon_inodes.h>
19#include <linux/nvgpu.h> 19#include <linux/nvgpu.h>
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <linux/spinlock.h> 21#include <nvgpu/lock.h>
22#include <linux/rculist.h> 22#include <linux/rculist.h>
23#include <linux/llist.h> 23#include <linux/llist.h>
24#include "clk/clk_arb.h" 24#include "clk/clk_arb.h"
@@ -139,10 +139,10 @@ struct nvgpu_clk_arb_target {
139}; 139};
140 140
141struct nvgpu_clk_arb { 141struct nvgpu_clk_arb {
142 spinlock_t sessions_lock; 142 struct nvgpu_spinlock sessions_lock;
143 spinlock_t users_lock; 143 struct nvgpu_spinlock users_lock;
144 144
145 struct mutex pstate_lock; 145 struct nvgpu_mutex pstate_lock;
146 struct list_head users; 146 struct list_head users;
147 struct list_head sessions; 147 struct list_head sessions;
148 struct llist_head requests; 148 struct llist_head requests;
@@ -308,9 +308,9 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
308 g->clk_arb = arb; 308 g->clk_arb = arb;
309 arb->g = g; 309 arb->g = g;
310 310
311 mutex_init(&arb->pstate_lock); 311 nvgpu_mutex_init(&arb->pstate_lock);
312 spin_lock_init(&arb->sessions_lock); 312 nvgpu_spinlock_init(&arb->sessions_lock);
313 spin_lock_init(&arb->users_lock); 313 nvgpu_spinlock_init(&arb->users_lock);
314 314
315 err = g->ops.clk_arb.get_arbiter_clk_default(g, 315 err = g->ops.clk_arb.get_arbiter_clk_default(g,
316 CTRL_CLK_DOMAIN_MCLK, &default_mhz); 316 CTRL_CLK_DOMAIN_MCLK, &default_mhz);
@@ -546,9 +546,9 @@ int nvgpu_clk_arb_init_session(struct gk20a *g,
546 546
547 init_llist_head(&session->targets); 547 init_llist_head(&session->targets);
548 548
549 spin_lock(&arb->sessions_lock); 549 nvgpu_spinlock_acquire(&arb->sessions_lock);
550 list_add_tail_rcu(&session->link, &arb->sessions); 550 list_add_tail_rcu(&session->link, &arb->sessions);
551 spin_unlock(&arb->sessions_lock); 551 nvgpu_spinlock_release(&arb->sessions_lock);
552 552
553 *_session = session; 553 *_session = session;
554 554
@@ -573,9 +573,9 @@ static void nvgpu_clk_arb_free_session(struct kref *refcount)
573 573
574 gk20a_dbg_fn(""); 574 gk20a_dbg_fn("");
575 575
576 spin_lock(&arb->sessions_lock); 576 nvgpu_spinlock_acquire(&arb->sessions_lock);
577 list_del_rcu(&session->link); 577 list_del_rcu(&session->link);
578 spin_unlock(&arb->sessions_lock); 578 nvgpu_spinlock_release(&arb->sessions_lock);
579 579
580 head = llist_del_all(&session->targets); 580 head = llist_del_all(&session->targets);
581 llist_for_each_entry_safe(dev, tmp, head, node) { 581 llist_for_each_entry_safe(dev, tmp, head, node) {
@@ -622,9 +622,9 @@ int nvgpu_clk_arb_install_event_fd(struct gk20a *g,
622 622
623 dev->arb_queue_head = atomic_read(&arb->notification_queue.head); 623 dev->arb_queue_head = atomic_read(&arb->notification_queue.head);
624 624
625 spin_lock(&arb->users_lock); 625 nvgpu_spinlock_acquire(&arb->users_lock);
626 list_add_tail_rcu(&dev->link, &arb->users); 626 list_add_tail_rcu(&dev->link, &arb->users);
627 spin_unlock(&arb->users_lock); 627 nvgpu_spinlock_release(&arb->users_lock);
628 628
629 *event_fd = fd; 629 *event_fd = fd;
630 630
@@ -1128,13 +1128,13 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1128 /* Program clocks */ 1128 /* Program clocks */
1129 /* A change in both mclk of gpc2clk may require a change in voltage */ 1129 /* A change in both mclk of gpc2clk may require a change in voltage */
1130 1130
1131 mutex_lock(&arb->pstate_lock); 1131 nvgpu_mutex_acquire(&arb->pstate_lock);
1132 status = nvgpu_lpwr_disable_pg(g, false); 1132 status = nvgpu_lpwr_disable_pg(g, false);
1133 1133
1134 status = clk_pmu_freq_controller_load(g, false); 1134 status = clk_pmu_freq_controller_load(g, false);
1135 if (status < 0) { 1135 if (status < 0) {
1136 arb->status = status; 1136 arb->status = status;
1137 mutex_unlock(&arb->pstate_lock); 1137 nvgpu_mutex_release(&arb->pstate_lock);
1138 1138
1139 /* make status visible */ 1139 /* make status visible */
1140 smp_mb(); 1140 smp_mb();
@@ -1143,7 +1143,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1143 status = volt_set_noiseaware_vmin(g, nuvmin, nuvmin_sram); 1143 status = volt_set_noiseaware_vmin(g, nuvmin, nuvmin_sram);
1144 if (status < 0) { 1144 if (status < 0) {
1145 arb->status = status; 1145 arb->status = status;
1146 mutex_unlock(&arb->pstate_lock); 1146 nvgpu_mutex_release(&arb->pstate_lock);
1147 1147
1148 /* make status visible */ 1148 /* make status visible */
1149 smp_mb(); 1149 smp_mb();
@@ -1155,7 +1155,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1155 voltuv_sram); 1155 voltuv_sram);
1156 if (status < 0) { 1156 if (status < 0) {
1157 arb->status = status; 1157 arb->status = status;
1158 mutex_unlock(&arb->pstate_lock); 1158 nvgpu_mutex_release(&arb->pstate_lock);
1159 1159
1160 /* make status visible */ 1160 /* make status visible */
1161 smp_mb(); 1161 smp_mb();
@@ -1165,7 +1165,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1165 status = clk_pmu_freq_controller_load(g, true); 1165 status = clk_pmu_freq_controller_load(g, true);
1166 if (status < 0) { 1166 if (status < 0) {
1167 arb->status = status; 1167 arb->status = status;
1168 mutex_unlock(&arb->pstate_lock); 1168 nvgpu_mutex_release(&arb->pstate_lock);
1169 1169
1170 /* make status visible */ 1170 /* make status visible */
1171 smp_mb(); 1171 smp_mb();
@@ -1175,7 +1175,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1175 status = nvgpu_lwpr_mclk_change(g, pstate); 1175 status = nvgpu_lwpr_mclk_change(g, pstate);
1176 if (status < 0) { 1176 if (status < 0) {
1177 arb->status = status; 1177 arb->status = status;
1178 mutex_unlock(&arb->pstate_lock); 1178 nvgpu_mutex_release(&arb->pstate_lock);
1179 1179
1180 /* make status visible */ 1180 /* make status visible */
1181 smp_mb(); 1181 smp_mb();
@@ -1200,7 +1200,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1200 status = nvgpu_lpwr_enable_pg(g, false); 1200 status = nvgpu_lpwr_enable_pg(g, false);
1201 if (status < 0) { 1201 if (status < 0) {
1202 arb->status = status; 1202 arb->status = status;
1203 mutex_unlock(&arb->pstate_lock); 1203 nvgpu_mutex_release(&arb->pstate_lock);
1204 1204
1205 /* make status visible */ 1205 /* make status visible */
1206 smp_mb(); 1206 smp_mb();
@@ -1212,7 +1212,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1212 atomic_inc(&arb->req_nr); 1212 atomic_inc(&arb->req_nr);
1213 1213
1214 /* Unlock pstate change for PG */ 1214 /* Unlock pstate change for PG */
1215 mutex_unlock(&arb->pstate_lock); 1215 nvgpu_mutex_release(&arb->pstate_lock);
1216 1216
1217 /* VF Update complete */ 1217 /* VF Update complete */
1218 nvgpu_clk_arb_set_global_alarm(g, EVENT(VF_UPDATE)); 1218 nvgpu_clk_arb_set_global_alarm(g, EVENT(VF_UPDATE));
@@ -1589,9 +1589,9 @@ static int nvgpu_clk_arb_release_event_dev(struct inode *inode,
1589 1589
1590 gk20a_dbg_fn(""); 1590 gk20a_dbg_fn("");
1591 1591
1592 spin_lock(&arb->users_lock); 1592 nvgpu_spinlock_acquire(&arb->users_lock);
1593 list_del_rcu(&dev->link); 1593 list_del_rcu(&dev->link);
1594 spin_unlock(&arb->users_lock); 1594 nvgpu_spinlock_release(&arb->users_lock);
1595 1595
1596 synchronize_rcu(); 1596 synchronize_rcu();
1597 kref_put(&session->refcount, nvgpu_clk_arb_free_session); 1597 kref_put(&session->refcount, nvgpu_clk_arb_free_session);
@@ -2000,9 +2000,9 @@ void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock)
2000 struct nvgpu_clk_arb *arb = g->clk_arb; 2000 struct nvgpu_clk_arb *arb = g->clk_arb;
2001 2001
2002 if (lock) 2002 if (lock)
2003 mutex_lock(&arb->pstate_lock); 2003 nvgpu_mutex_acquire(&arb->pstate_lock);
2004 else 2004 else
2005 mutex_unlock(&arb->pstate_lock); 2005 nvgpu_mutex_release(&arb->pstate_lock);
2006} 2006}
2007 2007
2008#ifdef CONFIG_DEBUG_FS 2008#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/nvgpu/clk/clk_mclk.c b/drivers/gpu/nvgpu/clk/clk_mclk.c
index 815f55ba..c2e9b35c 100644
--- a/drivers/gpu/nvgpu/clk/clk_mclk.c
+++ b/drivers/gpu/nvgpu/clk/clk_mclk.c
@@ -2185,8 +2185,8 @@ int clk_mclkseq_init_mclk_gddr5(struct gk20a *g)
2185 2185
2186 mclk = &g->clk_pmu.clk_mclk; 2186 mclk = &g->clk_pmu.clk_mclk;
2187 2187
2188 mutex_init(&mclk->mclk_lock); 2188 nvgpu_mutex_init(&mclk->mclk_lock);
2189 mutex_init(&mclk->data_lock); 2189 nvgpu_mutex_init(&mclk->data_lock);
2190 2190
2191 /* FBPA gain WAR */ 2191 /* FBPA gain WAR */
2192 gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222); 2192 gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222);
@@ -2257,7 +2257,7 @@ int clk_mclkseq_change_mclk_gddr5(struct gk20a *g, u16 val)
2257 2257
2258 mclk = &g->clk_pmu.clk_mclk; 2258 mclk = &g->clk_pmu.clk_mclk;
2259 2259
2260 mutex_lock(&mclk->mclk_lock); 2260 nvgpu_mutex_acquire(&mclk->mclk_lock);
2261 2261
2262 if (!mclk->init) 2262 if (!mclk->init)
2263 goto exit_status; 2263 goto exit_status;
@@ -2364,7 +2364,7 @@ int clk_mclkseq_change_mclk_gddr5(struct gk20a *g, u16 val)
2364#ifdef CONFIG_DEBUG_FS 2364#ifdef CONFIG_DEBUG_FS
2365 g->ops.read_ptimer(g, &t1); 2365 g->ops.read_ptimer(g, &t1);
2366 2366
2367 mutex_lock(&mclk->data_lock); 2367 nvgpu_mutex_acquire(&mclk->data_lock);
2368 mclk->switch_num++; 2368 mclk->switch_num++;
2369 2369
2370 if (mclk->switch_num == 1) { 2370 if (mclk->switch_num == 1) {
@@ -2387,11 +2387,11 @@ int clk_mclkseq_change_mclk_gddr5(struct gk20a *g, u16 val)
2387 mclk->switch_std += 2387 mclk->switch_std +=
2388 (curr - mclk->switch_avg) * (curr - prev_avg); 2388 (curr - mclk->switch_avg) * (curr - prev_avg);
2389 } 2389 }
2390 mutex_unlock(&mclk->data_lock); 2390 nvgpu_mutex_release(&mclk->data_lock);
2391#endif 2391#endif
2392exit_status: 2392exit_status:
2393 2393
2394 mutex_unlock(&mclk->mclk_lock); 2394 nvgpu_mutex_release(&mclk->mclk_lock);
2395 return status; 2395 return status;
2396} 2396}
2397 2397
@@ -2429,13 +2429,13 @@ static int mclk_switch_stats_show(struct seq_file *s, void *unused)
2429 mclk = &g->clk_pmu.clk_mclk; 2429 mclk = &g->clk_pmu.clk_mclk;
2430 2430
2431 /* Make copy of structure to reduce time with lock held */ 2431 /* Make copy of structure to reduce time with lock held */
2432 mutex_lock(&mclk->data_lock); 2432 nvgpu_mutex_acquire(&mclk->data_lock);
2433 std = mclk->switch_std; 2433 std = mclk->switch_std;
2434 avg = mclk->switch_avg; 2434 avg = mclk->switch_avg;
2435 max = mclk->switch_max; 2435 max = mclk->switch_max;
2436 min = mclk->switch_min; 2436 min = mclk->switch_min;
2437 num = mclk->switch_num; 2437 num = mclk->switch_num;
2438 mutex_unlock(&mclk->data_lock); 2438 nvgpu_mutex_release(&mclk->data_lock);
2439 2439
2440 tmp = std; 2440 tmp = std;
2441 do_div(tmp, num); 2441 do_div(tmp, num);
diff --git a/drivers/gpu/nvgpu/clk/clk_mclk.h b/drivers/gpu/nvgpu/clk/clk_mclk.h
index cb7f0de0..731f289d 100644
--- a/drivers/gpu/nvgpu/clk/clk_mclk.h
+++ b/drivers/gpu/nvgpu/clk/clk_mclk.h
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 2* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3* 3*
4* This program is free software; you can redistribute it and/or modify it 4* This program is free software; you can redistribute it and/or modify it
5* under the terms and conditions of the GNU General Public License, 5* under the terms and conditions of the GNU General Public License,
@@ -14,7 +14,7 @@
14#ifndef _CLKMCLK_H_ 14#ifndef _CLKMCLK_H_
15#define _CLKMCLK_H_ 15#define _CLKMCLK_H_
16 16
17#include <linux/mutex.h> 17#include <nvgpu/lock.h>
18 18
19enum gk20a_mclk_speed { 19enum gk20a_mclk_speed {
20 gk20a_mclk_low_speed, 20 gk20a_mclk_low_speed,
@@ -24,8 +24,8 @@ enum gk20a_mclk_speed {
24 24
25struct clk_mclk_state { 25struct clk_mclk_state {
26 enum gk20a_mclk_speed speed; 26 enum gk20a_mclk_speed speed;
27 struct mutex mclk_lock; 27 struct nvgpu_mutex mclk_lock;
28 struct mutex data_lock; 28 struct nvgpu_mutex data_lock;
29 29
30 u16 p5_min; 30 u16 p5_min;
31 u16 p0_min; 31 u16 p0_min;