summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2017-10-10 01:15:54 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-16 16:35:04 -0400
commit61b263d83222cd5d2ff3d2d5d699b07ebdf44288 (patch)
tree07ef35fb427dc2f93741ed9193673b6da2872bb7 /drivers/gpu/nvgpu
parent30b9cbe35a2a0adc4e3a65b033dc0f61046783ea (diff)
gpu: nvgpu: replace wait_queue_head_t with nvgpu_cond
Replace existing usages of wait_queue_head_t with struct nvgpu_cond and using the corresponding APIs in order to reduce Linux dependencies in NVGPU. JIRA NVGPU-205 Change-Id: I85850369c3c47d3e1704e4171b1d172361842423 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1575778 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/clk/clk_arb.c25
-rw-r--r--drivers/gpu/nvgpu/common/linux/driver_common.c12
-rw-r--r--drivers/gpu/nvgpu/common/linux/intr.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c6
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/os_linux.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/pstate/pstate.c2
-rw-r--r--drivers/gpu/nvgpu/pstate/pstate.h2
14 files changed, 47 insertions, 46 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk_arb.c b/drivers/gpu/nvgpu/clk/clk_arb.c
index a914b4a2..937d47db 100644
--- a/drivers/gpu/nvgpu/clk/clk_arb.c
+++ b/drivers/gpu/nvgpu/clk/clk_arb.c
@@ -40,6 +40,7 @@
40#include <nvgpu/kref.h> 40#include <nvgpu/kref.h>
41#include <nvgpu/log.h> 41#include <nvgpu/log.h>
42#include <nvgpu/barrier.h> 42#include <nvgpu/barrier.h>
43#include <nvgpu/cond.h>
43 44
44#include "gk20a/gk20a.h" 45#include "gk20a/gk20a.h"
45#include "clk/clk_arb.h" 46#include "clk/clk_arb.h"
@@ -189,7 +190,7 @@ struct nvgpu_clk_arb {
189 struct work_struct vf_table_fn_work; 190 struct work_struct vf_table_fn_work;
190 struct workqueue_struct *vf_table_work_queue; 191 struct workqueue_struct *vf_table_work_queue;
191 192
192 wait_queue_head_t request_wq; 193 struct nvgpu_cond request_wq;
193 194
194 struct nvgpu_clk_vf_table *current_vf_table; 195 struct nvgpu_clk_vf_table *current_vf_table;
195 struct nvgpu_clk_vf_table vf_table_pool[2]; 196 struct nvgpu_clk_vf_table vf_table_pool[2];
@@ -218,7 +219,7 @@ struct nvgpu_clk_dev {
218 struct list_head link; 219 struct list_head link;
219 struct llist_node node; 220 struct llist_node node;
220 }; 221 };
221 wait_queue_head_t readout_wq; 222 struct nvgpu_cond readout_wq;
222 nvgpu_atomic_t poll_mask; 223 nvgpu_atomic_t poll_mask;
223 u16 gpc2clk_target_mhz; 224 u16 gpc2clk_target_mhz;
224 u16 mclk_target_mhz; 225 u16 mclk_target_mhz;
@@ -371,7 +372,7 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
371 INIT_LIST_HEAD_RCU(&arb->sessions); 372 INIT_LIST_HEAD_RCU(&arb->sessions);
372 init_llist_head(&arb->requests); 373 init_llist_head(&arb->requests);
373 374
374 init_waitqueue_head(&arb->request_wq); 375 nvgpu_cond_init(&arb->request_wq);
375 arb->vf_table_work_queue = alloc_workqueue("%s", WQ_HIGHPRI, 1, 376 arb->vf_table_work_queue = alloc_workqueue("%s", WQ_HIGHPRI, 1,
376 "vf_table_update"); 377 "vf_table_update");
377 arb->update_work_queue = alloc_workqueue("%s", WQ_HIGHPRI, 1, 378 arb->update_work_queue = alloc_workqueue("%s", WQ_HIGHPRI, 1,
@@ -400,8 +401,8 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
400 do { 401 do {
401 /* Check that first run is completed */ 402 /* Check that first run is completed */
402 nvgpu_smp_mb(); 403 nvgpu_smp_mb();
403 wait_event_interruptible(arb->request_wq, 404 NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
404 nvgpu_atomic_read(&arb->req_nr)); 405 nvgpu_atomic_read(&arb->req_nr), 0);
405 } while (!nvgpu_atomic_read(&arb->req_nr)); 406 } while (!nvgpu_atomic_read(&arb->req_nr));
406 407
407 408
@@ -547,7 +548,7 @@ static int nvgpu_clk_arb_install_fd(struct gk20a *g,
547 548
548 fd_install(fd, file); 549 fd_install(fd, file);
549 550
550 init_waitqueue_head(&dev->readout_wq); 551 nvgpu_cond_init(&dev->readout_wq);
551 552
552 nvgpu_atomic_set(&dev->poll_mask, 0); 553 nvgpu_atomic_set(&dev->poll_mask, 0);
553 554
@@ -1269,7 +1270,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1269 /* VF Update complete */ 1270 /* VF Update complete */
1270 nvgpu_clk_arb_set_global_alarm(g, EVENT(VF_UPDATE)); 1271 nvgpu_clk_arb_set_global_alarm(g, EVENT(VF_UPDATE));
1271 1272
1272 wake_up_interruptible(&arb->request_wq); 1273 nvgpu_cond_signal_interruptible(&arb->request_wq);
1273 1274
1274#ifdef CONFIG_DEBUG_FS 1275#ifdef CONFIG_DEBUG_FS
1275 g->ops.bus.read_ptimer(g, &t1); 1276 g->ops.bus.read_ptimer(g, &t1);
@@ -1317,7 +1318,7 @@ exit_arb:
1317 head = llist_del_all(&arb->requests); 1318 head = llist_del_all(&arb->requests);
1318 llist_for_each_entry_safe(dev, tmp, head, node) { 1319 llist_for_each_entry_safe(dev, tmp, head, node) {
1319 nvgpu_atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM); 1320 nvgpu_atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM);
1320 wake_up_interruptible(&dev->readout_wq); 1321 nvgpu_cond_signal_interruptible(&dev->readout_wq);
1321 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); 1322 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
1322 } 1323 }
1323 1324
@@ -1444,7 +1445,7 @@ static u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
1444 1445
1445 if (poll_mask) { 1446 if (poll_mask) {
1446 nvgpu_atomic_set(&dev->poll_mask, poll_mask); 1447 nvgpu_atomic_set(&dev->poll_mask, poll_mask);
1447 wake_up_interruptible_all(&dev->readout_wq); 1448 nvgpu_cond_broadcast_interruptible(&dev->readout_wq);
1448 } 1449 }
1449 1450
1450 return new_alarms_reported; 1451 return new_alarms_reported;
@@ -1587,8 +1588,8 @@ static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf,
1587 while (!__pending_event(dev, &info)) { 1588 while (!__pending_event(dev, &info)) {
1588 if (filp->f_flags & O_NONBLOCK) 1589 if (filp->f_flags & O_NONBLOCK)
1589 return -EAGAIN; 1590 return -EAGAIN;
1590 err = wait_event_interruptible(dev->readout_wq, 1591 err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
1591 __pending_event(dev, &info)); 1592 __pending_event(dev, &info), 0);
1592 if (err) 1593 if (err)
1593 return err; 1594 return err;
1594 if (info.timestamp) 1595 if (info.timestamp)
@@ -1607,7 +1608,7 @@ static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait)
1607 1608
1608 gk20a_dbg_fn(""); 1609 gk20a_dbg_fn("");
1609 1610
1610 poll_wait(filp, &dev->readout_wq, wait); 1611 poll_wait(filp, &dev->readout_wq.wq, wait);
1611 return nvgpu_atomic_xchg(&dev->poll_mask, 0); 1612 return nvgpu_atomic_xchg(&dev->poll_mask, 0);
1612} 1613}
1613 1614
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c
index 295297b6..e4a65692 100644
--- a/drivers/gpu/nvgpu/common/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/common/linux/driver_common.c
@@ -39,8 +39,8 @@ static void nvgpu_init_vars(struct gk20a *g)
39 struct device *dev = dev_from_gk20a(g); 39 struct device *dev = dev_from_gk20a(g);
40 struct gk20a_platform *platform = dev_get_drvdata(dev); 40 struct gk20a_platform *platform = dev_get_drvdata(dev);
41 41
42 init_waitqueue_head(&l->sw_irq_stall_last_handled_wq); 42 nvgpu_cond_init(&l->sw_irq_stall_last_handled_wq);
43 init_waitqueue_head(&l->sw_irq_nonstall_last_handled_wq); 43 nvgpu_cond_init(&l->sw_irq_nonstall_last_handled_wq);
44 gk20a_init_gr(g); 44 gk20a_init_gr(g);
45 45
46 init_rwsem(&l->busy_lock); 46 init_rwsem(&l->busy_lock);
@@ -261,14 +261,14 @@ void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
261 int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count); 261 int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
262 262
263 /* wait until all stalling irqs are handled */ 263 /* wait until all stalling irqs are handled */
264 wait_event(l->sw_irq_stall_last_handled_wq, 264 NVGPU_COND_WAIT(&l->sw_irq_stall_last_handled_wq,
265 cyclic_delta(stall_irq_threshold, 265 cyclic_delta(stall_irq_threshold,
266 atomic_read(&l->sw_irq_stall_last_handled)) 266 atomic_read(&l->sw_irq_stall_last_handled))
267 <= 0); 267 <= 0, 0);
268 268
269 /* wait until all non-stalling irqs are handled */ 269 /* wait until all non-stalling irqs are handled */
270 wait_event(l->sw_irq_nonstall_last_handled_wq, 270 NVGPU_COND_WAIT(&l->sw_irq_nonstall_last_handled_wq,
271 cyclic_delta(nonstall_irq_threshold, 271 cyclic_delta(nonstall_irq_threshold,
272 atomic_read(&l->sw_irq_nonstall_last_handled)) 272 atomic_read(&l->sw_irq_nonstall_last_handled))
273 <= 0); 273 <= 0, 0);
274} 274}
diff --git a/drivers/gpu/nvgpu/common/linux/intr.c b/drivers/gpu/nvgpu/common/linux/intr.c
index da177b55..d1b6ef36 100644
--- a/drivers/gpu/nvgpu/common/linux/intr.c
+++ b/drivers/gpu/nvgpu/common/linux/intr.c
@@ -59,7 +59,7 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
59 /* sync handled irq counter before re-enabling interrupts */ 59 /* sync handled irq counter before re-enabling interrupts */
60 atomic_set(&l->sw_irq_stall_last_handled, hw_irq_count); 60 atomic_set(&l->sw_irq_stall_last_handled, hw_irq_count);
61 61
62 wake_up_all(&l->sw_irq_stall_last_handled_wq); 62 nvgpu_cond_broadcast(&l->sw_irq_stall_last_handled_wq);
63 63
64 trace_mc_gk20a_intr_thread_stall_done(g->name); 64 trace_mc_gk20a_intr_thread_stall_done(g->name);
65 65
@@ -128,7 +128,7 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
128 128
129 g->ops.mc.intr_nonstall_resume(g); 129 g->ops.mc.intr_nonstall_resume(g);
130 130
131 wake_up_all(&l->sw_irq_nonstall_last_handled_wq); 131 nvgpu_cond_broadcast(&l->sw_irq_nonstall_last_handled_wq);
132 132
133 return IRQ_HANDLED; 133 return IRQ_HANDLED;
134} 134}
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 6c66eca0..3ea07eed 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -575,7 +575,7 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
575 575
576 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, ""); 576 gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
577 577
578 poll_wait(filep, &event_id_data->event_id_wq, wait); 578 poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
579 579
580 nvgpu_mutex_acquire(&event_id_data->lock); 580 nvgpu_mutex_acquire(&event_id_data->lock);
581 581
@@ -683,7 +683,7 @@ void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
683 event_id, ch->chid); 683 event_id, ch->chid);
684 event_id_data->event_posted = true; 684 event_id_data->event_posted = true;
685 685
686 wake_up_interruptible_all(&event_id_data->event_id_wq); 686 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
687 687
688 nvgpu_mutex_release(&event_id_data->lock); 688 nvgpu_mutex_release(&event_id_data->lock);
689} 689}
@@ -735,7 +735,7 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
735 event_id_data->is_tsg = false; 735 event_id_data->is_tsg = false;
736 event_id_data->event_id = event_id; 736 event_id_data->event_id = event_id;
737 737
738 init_waitqueue_head(&event_id_data->event_id_wq); 738 nvgpu_cond_init(&event_id_data->event_id_wq);
739 err = nvgpu_mutex_init(&event_id_data->lock); 739 err = nvgpu_mutex_init(&event_id_data->lock);
740 if (err) 740 if (err)
741 goto clean_up_free; 741 goto clean_up_free;
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
index 10379633..6d0439f3 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c
@@ -97,7 +97,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
97 event_id, tsg->tsgid); 97 event_id, tsg->tsgid);
98 event_id_data->event_posted = true; 98 event_id_data->event_posted = true;
99 99
100 wake_up_interruptible_all(&event_id_data->event_id_wq); 100 nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
101 101
102 nvgpu_mutex_release(&event_id_data->lock); 102 nvgpu_mutex_release(&event_id_data->lock);
103} 103}
@@ -150,7 +150,7 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
150 event_id_data->is_tsg = true; 150 event_id_data->is_tsg = true;
151 event_id_data->event_id = event_id; 151 event_id_data->event_id = event_id;
152 152
153 init_waitqueue_head(&event_id_data->event_id_wq); 153 nvgpu_cond_init(&event_id_data->event_id_wq);
154 err = nvgpu_mutex_init(&event_id_data->lock); 154 err = nvgpu_mutex_init(&event_id_data->lock);
155 if (err) 155 if (err)
156 goto clean_up_free; 156 goto clean_up_free;
diff --git a/drivers/gpu/nvgpu/common/linux/os_linux.h b/drivers/gpu/nvgpu/common/linux/os_linux.h
index 4a3128c3..c67cbbcc 100644
--- a/drivers/gpu/nvgpu/common/linux/os_linux.h
+++ b/drivers/gpu/nvgpu/common/linux/os_linux.h
@@ -77,12 +77,12 @@ struct nvgpu_os_linux {
77 atomic_t hw_irq_stall_count; 77 atomic_t hw_irq_stall_count;
78 atomic_t hw_irq_nonstall_count; 78 atomic_t hw_irq_nonstall_count;
79 79
80 wait_queue_head_t sw_irq_stall_last_handled_wq; 80 struct nvgpu_cond sw_irq_stall_last_handled_wq;
81 atomic_t sw_irq_stall_last_handled; 81 atomic_t sw_irq_stall_last_handled;
82 82
83 atomic_t nonstall_ops; 83 atomic_t nonstall_ops;
84 84
85 wait_queue_head_t sw_irq_nonstall_last_handled_wq; 85 struct nvgpu_cond sw_irq_nonstall_last_handled_wq;
86 atomic_t sw_irq_nonstall_last_handled; 86 atomic_t sw_irq_nonstall_last_handled;
87 87
88 struct work_struct nonstall_fn_work; 88 struct work_struct nonstall_fn_work;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 9c0d22d2..4b1cb351 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -126,7 +126,7 @@ struct gk20a_event_id_data {
126 126
127 bool event_posted; 127 bool event_posted;
128 128
129 wait_queue_head_t event_id_wq; 129 struct nvgpu_cond event_id_wq;
130 struct nvgpu_mutex lock; 130 struct nvgpu_mutex lock;
131 struct nvgpu_list_node event_id_node; 131 struct nvgpu_list_node event_id_node;
132}; 132};
diff --git a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
index 9977c5a1..fb33de23 100644
--- a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c
@@ -53,7 +53,7 @@ struct gk20a_ctxsw_dev {
53 struct nvgpu_ctxsw_trace_entry *ents; 53 struct nvgpu_ctxsw_trace_entry *ents;
54 struct nvgpu_ctxsw_trace_filter filter; 54 struct nvgpu_ctxsw_trace_filter filter;
55 bool write_enabled; 55 bool write_enabled;
56 wait_queue_head_t readout_wq; 56 struct nvgpu_cond readout_wq;
57 size_t size; 57 size_t size;
58 u32 num_ents; 58 u32 num_ents;
59 59
@@ -100,8 +100,8 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
100 nvgpu_mutex_release(&dev->write_lock); 100 nvgpu_mutex_release(&dev->write_lock);
101 if (filp->f_flags & O_NONBLOCK) 101 if (filp->f_flags & O_NONBLOCK)
102 return -EAGAIN; 102 return -EAGAIN;
103 err = wait_event_interruptible(dev->readout_wq, 103 err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
104 !ring_is_empty(hdr)); 104 !ring_is_empty(hdr), 0);
105 if (err) 105 if (err)
106 return err; 106 return err;
107 nvgpu_mutex_acquire(&dev->write_lock); 107 nvgpu_mutex_acquire(&dev->write_lock);
@@ -436,7 +436,7 @@ unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
436 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 436 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "");
437 437
438 nvgpu_mutex_acquire(&dev->write_lock); 438 nvgpu_mutex_acquire(&dev->write_lock);
439 poll_wait(filp, &dev->readout_wq, wait); 439 poll_wait(filp, &dev->readout_wq.wq, wait);
440 if (!ring_is_empty(hdr)) 440 if (!ring_is_empty(hdr))
441 mask |= POLLIN | POLLRDNORM; 441 mask |= POLLIN | POLLRDNORM;
442 nvgpu_mutex_release(&dev->write_lock); 442 nvgpu_mutex_release(&dev->write_lock);
@@ -503,7 +503,7 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g)
503 dev->g = g; 503 dev->g = g;
504 dev->hdr = NULL; 504 dev->hdr = NULL;
505 dev->write_enabled = false; 505 dev->write_enabled = false;
506 init_waitqueue_head(&dev->readout_wq); 506 nvgpu_cond_init(&dev->readout_wq);
507 err = nvgpu_mutex_init(&dev->write_lock); 507 err = nvgpu_mutex_init(&dev->write_lock);
508 if (err) 508 if (err)
509 return err; 509 return err;
@@ -683,7 +683,7 @@ void gk20a_ctxsw_trace_wake_up(struct gk20a *g, int vmid)
683 return; 683 return;
684 684
685 dev = &g->ctxsw_trace->devs[vmid]; 685 dev = &g->ctxsw_trace->devs[vmid];
686 wake_up_interruptible(&dev->readout_wq); 686 nvgpu_cond_signal_interruptible(&dev->readout_wq);
687} 687}
688 688
689void gk20a_ctxsw_trace_channel_reset(struct gk20a *g, struct channel_gk20a *ch) 689void gk20a_ctxsw_trace_channel_reset(struct gk20a *g, struct channel_gk20a *ch)
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index c2885807..135cb1e9 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -158,7 +158,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
158 dbg_session->is_pg_disabled = false; 158 dbg_session->is_pg_disabled = false;
159 dbg_session->is_timeout_disabled = false; 159 dbg_session->is_timeout_disabled = false;
160 160
161 init_waitqueue_head(&dbg_session->dbg_events.wait_queue); 161 nvgpu_cond_init(&dbg_session->dbg_events.wait_queue);
162 nvgpu_init_list_node(&dbg_session->ch_list); 162 nvgpu_init_list_node(&dbg_session->ch_list);
163 err = nvgpu_mutex_init(&dbg_session->ch_list_lock); 163 err = nvgpu_mutex_init(&dbg_session->ch_list_lock);
164 if (err) 164 if (err)
@@ -286,7 +286,7 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
286 286
287 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 287 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
288 288
289 poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait); 289 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
290 290
291 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 291 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
292 292
@@ -337,7 +337,7 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
337 337
338 dbg_s->dbg_events.num_pending_events++; 338 dbg_s->dbg_events.num_pending_events++;
339 339
340 wake_up_interruptible_all(&dbg_s->dbg_events.wait_queue); 340 nvgpu_cond_broadcast_interruptible(&dbg_s->dbg_events.wait_queue);
341 } 341 }
342 } 342 }
343 343
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
index 062674fb..1a6de3a8 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
@@ -41,7 +41,7 @@ struct channel_gk20a *
41nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s); 41nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s);
42 42
43struct dbg_gpu_session_events { 43struct dbg_gpu_session_events {
44 wait_queue_head_t wait_queue; 44 struct nvgpu_cond wait_queue;
45 bool events_enabled; 45 bool events_enabled;
46 int num_pending_events; 46 int num_pending_events;
47}; 47};
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
index d635b43b..a77536af 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
@@ -58,8 +58,8 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
58 nvgpu_mutex_release(&sched->status_lock); 58 nvgpu_mutex_release(&sched->status_lock);
59 if (filp->f_flags & O_NONBLOCK) 59 if (filp->f_flags & O_NONBLOCK)
60 return -EAGAIN; 60 return -EAGAIN;
61 err = wait_event_interruptible(sched->readout_wq, 61 err = NVGPU_COND_WAIT_INTERRUPTIBLE(&sched->readout_wq,
62 sched->status); 62 sched->status, 0);
63 if (err) 63 if (err)
64 return err; 64 return err;
65 nvgpu_mutex_acquire(&sched->status_lock); 65 nvgpu_mutex_acquire(&sched->status_lock);
@@ -88,7 +88,7 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
88 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); 88 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
89 89
90 nvgpu_mutex_acquire(&sched->status_lock); 90 nvgpu_mutex_acquire(&sched->status_lock);
91 poll_wait(filp, &sched->readout_wq, wait); 91 poll_wait(filp, &sched->readout_wq.wq, wait);
92 if (sched->status) 92 if (sched->status)
93 mask |= POLLIN | POLLRDNORM; 93 mask |= POLLIN | POLLRDNORM;
94 nvgpu_mutex_release(&sched->status_lock); 94 nvgpu_mutex_release(&sched->status_lock);
@@ -552,7 +552,7 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
552 NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap); 552 NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap);
553 sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN; 553 sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN;
554 nvgpu_mutex_release(&sched->status_lock); 554 nvgpu_mutex_release(&sched->status_lock);
555 wake_up_interruptible(&sched->readout_wq); 555 nvgpu_cond_signal_interruptible(&sched->readout_wq);
556} 556}
557 557
558void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg) 558void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
@@ -609,7 +609,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
609 goto free_recent; 609 goto free_recent;
610 } 610 }
611 611
612 init_waitqueue_head(&sched->readout_wq); 612 nvgpu_cond_init(&sched->readout_wq);
613 613
614 err = nvgpu_mutex_init(&sched->status_lock); 614 err = nvgpu_mutex_init(&sched->status_lock);
615 if (err) 615 if (err)
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.h b/drivers/gpu/nvgpu/gk20a/sched_gk20a.h
index a54553c1..0cdb9914 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.h
@@ -44,7 +44,7 @@ struct gk20a_sched_ctrl {
44 u64 *recent_tsg_bitmap; 44 u64 *recent_tsg_bitmap;
45 u64 *ref_tsg_bitmap; 45 u64 *ref_tsg_bitmap;
46 46
47 wait_queue_head_t readout_wq; 47 struct nvgpu_cond readout_wq;
48}; 48};
49 49
50int gk20a_sched_dev_release(struct inode *inode, struct file *filp); 50int gk20a_sched_dev_release(struct inode *inode, struct file *filp);
diff --git a/drivers/gpu/nvgpu/pstate/pstate.c b/drivers/gpu/nvgpu/pstate/pstate.c
index b9f82c72..512653ba 100644
--- a/drivers/gpu/nvgpu/pstate/pstate.c
+++ b/drivers/gpu/nvgpu/pstate/pstate.c
@@ -357,7 +357,7 @@ static int pstate_sw_setup(struct gk20a *g)
357 357
358 gk20a_dbg_fn(""); 358 gk20a_dbg_fn("");
359 359
360 init_waitqueue_head(&g->perf_pmu.pstatesobjs.pstate_notifier_wq); 360 nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq);
361 361
362 err = nvgpu_mutex_init(&g->perf_pmu.pstatesobjs.pstate_mutex); 362 err = nvgpu_mutex_init(&g->perf_pmu.pstatesobjs.pstate_mutex);
363 if (err) 363 if (err)
diff --git a/drivers/gpu/nvgpu/pstate/pstate.h b/drivers/gpu/nvgpu/pstate/pstate.h
index 17dbd476..55726d55 100644
--- a/drivers/gpu/nvgpu/pstate/pstate.h
+++ b/drivers/gpu/nvgpu/pstate/pstate.h
@@ -58,7 +58,7 @@ struct pstate {
58struct pstates { 58struct pstates {
59 struct boardobjgrp_e32 super; 59 struct boardobjgrp_e32 super;
60 u32 num_levels; 60 u32 num_levels;
61 wait_queue_head_t pstate_notifier_wq; 61 struct nvgpu_cond pstate_notifier_wq;
62 u32 is_pstate_switch_on; 62 u32 is_pstate_switch_on;
63 struct nvgpu_mutex pstate_mutex; /* protect is_pstate_switch_on */ 63 struct nvgpu_mutex pstate_mutex; /* protect is_pstate_switch_on */
64}; 64};