summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/clk_arb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/clk_arb.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/clk_arb.c44
1 files changed, 21 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/clk_arb.c b/drivers/gpu/nvgpu/common/linux/clk_arb.c
index ec40a6ce..bb0fd628 100644
--- a/drivers/gpu/nvgpu/common/linux/clk_arb.c
+++ b/drivers/gpu/nvgpu/common/linux/clk_arb.c
@@ -14,12 +14,6 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <linux/cdev.h>
18#include <linux/file.h>
19#include <linux/list.h>
20#include <linux/anon_inodes.h>
21#include <linux/uaccess.h>
22
23#include <nvgpu/bitops.h> 17#include <nvgpu/bitops.h>
24#include <nvgpu/lock.h> 18#include <nvgpu/lock.h>
25#include <nvgpu/kmem.h> 19#include <nvgpu/kmem.h>
@@ -29,6 +23,7 @@
29#include <nvgpu/log.h> 23#include <nvgpu/log.h>
30#include <nvgpu/barrier.h> 24#include <nvgpu/barrier.h>
31#include <nvgpu/cond.h> 25#include <nvgpu/cond.h>
26#include <nvgpu/list.h>
32#include <nvgpu/clk_arb.h> 27#include <nvgpu/clk_arb.h>
33 28
34#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
@@ -774,7 +769,8 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
774 mclk_target = 0; 769 mclk_target = 0;
775 770
776 nvgpu_spinlock_acquire(&arb->sessions_lock); 771 nvgpu_spinlock_acquire(&arb->sessions_lock);
777 list_for_each_entry(session, &arb->sessions, link) { 772 nvgpu_list_for_each_entry(session, &arb->sessions,
773 nvgpu_clk_session, link) {
778 if (!session->zombie) { 774 if (!session->zombie) {
779 mclk_set = false; 775 mclk_set = false;
780 gpc2clk_set = false; 776 gpc2clk_set = false;
@@ -782,13 +778,13 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
782 &session->target_pool[1] : 778 &session->target_pool[1] :
783 &session->target_pool[0]); 779 &session->target_pool[0]);
784 nvgpu_spinlock_acquire(&session->session_lock); 780 nvgpu_spinlock_acquire(&session->session_lock);
785 if (!list_empty(&session->targets)) { 781 if (!nvgpu_list_empty(&session->targets)) {
786 /* Copy over state */ 782 /* Copy over state */
787 target->mclk = session->target->mclk; 783 target->mclk = session->target->mclk;
788 target->gpc2clk = session->target->gpc2clk; 784 target->gpc2clk = session->target->gpc2clk;
789 /* Query the latest committed request */ 785 /* Query the latest committed request */
790 list_for_each_entry_safe(dev, tmp, &session->targets, 786 nvgpu_list_for_each_entry_safe(dev, tmp, &session->targets,
791 node) { 787 nvgpu_clk_dev, node) {
792 if (!mclk_set && dev->mclk_target_mhz) { 788 if (!mclk_set && dev->mclk_target_mhz) {
793 target->mclk = 789 target->mclk =
794 dev->mclk_target_mhz; 790 dev->mclk_target_mhz;
@@ -801,9 +797,9 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
801 gpc2clk_set = true; 797 gpc2clk_set = true;
802 } 798 }
803 nvgpu_ref_get(&dev->refcount); 799 nvgpu_ref_get(&dev->refcount);
804 list_del(&dev->node); 800 nvgpu_list_del(&dev->node);
805 nvgpu_spinlock_acquire(&arb->requests_lock); 801 nvgpu_spinlock_acquire(&arb->requests_lock);
806 list_add(&dev->node, &arb->requests); 802 nvgpu_list_add(&dev->node, &arb->requests);
807 nvgpu_spinlock_release(&arb->requests_lock); 803 nvgpu_spinlock_release(&arb->requests_lock);
808 } 804 }
809 xchg(&session->target, target); 805 xchg(&session->target, target);
@@ -1007,11 +1003,12 @@ exit_arb:
1007 current_alarm = (u32) nvgpu_atomic64_read(&arb->alarm_mask); 1003 current_alarm = (u32) nvgpu_atomic64_read(&arb->alarm_mask);
1008 /* notify completion for all requests */ 1004 /* notify completion for all requests */
1009 nvgpu_spinlock_acquire(&arb->requests_lock); 1005 nvgpu_spinlock_acquire(&arb->requests_lock);
1010 list_for_each_entry_safe(dev, tmp, &arb->requests, node) { 1006 nvgpu_list_for_each_entry_safe(dev, tmp, &arb->requests,
1007 nvgpu_clk_dev, node) {
1011 nvgpu_atomic_set(&dev->poll_mask, NVGPU_POLLIN | NVGPU_POLLRDNORM); 1008 nvgpu_atomic_set(&dev->poll_mask, NVGPU_POLLIN | NVGPU_POLLRDNORM);
1012 nvgpu_cond_signal_interruptible(&dev->readout_wq); 1009 nvgpu_cond_signal_interruptible(&dev->readout_wq);
1013 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); 1010 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
1014 list_del(&dev->node); 1011 nvgpu_list_del(&dev->node);
1015 } 1012 }
1016 nvgpu_spinlock_release(&arb->requests_lock); 1013 nvgpu_spinlock_release(&arb->requests_lock);
1017 1014
@@ -1019,7 +1016,7 @@ exit_arb:
1019 nvgpu_atomic_read(&arb->notification_queue.tail)); 1016 nvgpu_atomic_read(&arb->notification_queue.tail));
1020 /* notify event for all users */ 1017 /* notify event for all users */
1021 nvgpu_spinlock_acquire(&arb->users_lock); 1018 nvgpu_spinlock_acquire(&arb->users_lock);
1022 list_for_each_entry(dev, &arb->users, link) { 1019 nvgpu_list_for_each_entry(dev, &arb->users, nvgpu_clk_dev, link) {
1023 alarms_notified |= 1020 alarms_notified |=
1024 nvgpu_clk_arb_notify(dev, arb->actual, current_alarm); 1021 nvgpu_clk_arb_notify(dev, arb->actual, current_alarm);
1025 } 1022 }
@@ -1118,9 +1115,9 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
1118 if (err < 0) 1115 if (err < 0)
1119 goto init_fail; 1116 goto init_fail;
1120 1117
1121 INIT_LIST_HEAD(&arb->users); 1118 nvgpu_init_list_node(&arb->users);
1122 INIT_LIST_HEAD(&arb->sessions); 1119 nvgpu_init_list_node(&arb->sessions);
1123 INIT_LIST_HEAD(&arb->requests); 1120 nvgpu_init_list_node(&arb->requests);
1124 1121
1125 nvgpu_cond_init(&arb->request_wq); 1122 nvgpu_cond_init(&arb->request_wq);
1126 arb->vf_table_work_queue = alloc_workqueue("%s", WQ_HIGHPRI, 1, 1123 arb->vf_table_work_queue = alloc_workqueue("%s", WQ_HIGHPRI, 1,
@@ -1244,11 +1241,11 @@ int nvgpu_clk_arb_init_session(struct gk20a *g,
1244 nvgpu_smp_wmb(); 1241 nvgpu_smp_wmb();
1245 session->target = &session->target_pool[0]; 1242 session->target = &session->target_pool[0];
1246 1243
1247 INIT_LIST_HEAD(&session->targets); 1244 nvgpu_init_list_node(&session->targets);
1248 nvgpu_spinlock_init(&session->session_lock); 1245 nvgpu_spinlock_init(&session->session_lock);
1249 1246
1250 nvgpu_spinlock_acquire(&arb->sessions_lock); 1247 nvgpu_spinlock_acquire(&arb->sessions_lock);
1251 list_add_tail(&session->link, &arb->sessions); 1248 nvgpu_list_add_tail(&session->link, &arb->sessions);
1252 nvgpu_spinlock_release(&arb->sessions_lock); 1249 nvgpu_spinlock_release(&arb->sessions_lock);
1253 1250
1254 *_session = session; 1251 *_session = session;
@@ -1277,14 +1274,15 @@ void nvgpu_clk_arb_free_session(struct nvgpu_ref *refcount)
1277 1274
1278 if (arb) { 1275 if (arb) {
1279 nvgpu_spinlock_acquire(&arb->sessions_lock); 1276 nvgpu_spinlock_acquire(&arb->sessions_lock);
1280 list_del(&session->link); 1277 nvgpu_list_del(&session->link);
1281 nvgpu_spinlock_release(&arb->sessions_lock); 1278 nvgpu_spinlock_release(&arb->sessions_lock);
1282 } 1279 }
1283 1280
1284 nvgpu_spinlock_acquire(&session->session_lock); 1281 nvgpu_spinlock_acquire(&session->session_lock);
1285 list_for_each_entry_safe(dev, tmp, &session->targets, node) { 1282 nvgpu_list_for_each_entry_safe(dev, tmp, &session->targets,
1283 nvgpu_clk_dev, node) {
1286 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); 1284 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
1287 list_del(&dev->node); 1285 nvgpu_list_del(&dev->node);
1288 } 1286 }
1289 nvgpu_spinlock_release(&session->session_lock); 1287 nvgpu_spinlock_release(&session->session_lock);
1290 1288