summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-04-11 08:24:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-12 18:34:25 -0400
commita6adaaab7a90d86a71943f9977920ad7f726cea8 (patch)
tree79dcfa4f268fb5b5784996aa3b91f86e8a6ef640 /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parentf474a9e0e41d62f6a804c6501068da621556013e (diff)
gpu: nvgpu: use nvgpu list for channel worker item
Use nvgpu list APIs instead of linux list APIs to store channel worker items Jira NVGPU-13 Change-Id: I01d214810ca2495bd0a644dd1a2816ab8e526981 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1460575 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index b729e0f8..f5ba9e50 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1736,7 +1736,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
1736 gk20a_dbg_fn(""); 1736 gk20a_dbg_fn("");
1737 1737
1738 while (__gk20a_channel_worker_pending(g, *get)) { 1738 while (__gk20a_channel_worker_pending(g, *get)) {
1739 struct channel_gk20a *ch; 1739 struct channel_gk20a *ch = NULL;
1740 1740
1741 /* 1741 /*
1742 * If a channel is on the list, it's guaranteed to be handled 1742 * If a channel is on the list, it's guaranteed to be handled
@@ -1751,11 +1751,12 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
1751 * enqueue are harmless. 1751 * enqueue are harmless.
1752 */ 1752 */
1753 nvgpu_spinlock_acquire(&g->channel_worker.items_lock); 1753 nvgpu_spinlock_acquire(&g->channel_worker.items_lock);
1754 ch = list_first_entry_or_null(&g->channel_worker.items, 1754 if (!nvgpu_list_empty(&g->channel_worker.items)) {
1755 struct channel_gk20a, 1755 ch = nvgpu_list_first_entry(&g->channel_worker.items,
1756 channel_gk20a,
1756 worker_item); 1757 worker_item);
1757 if (ch) 1758 nvgpu_list_del(&ch->worker_item);
1758 list_del_init(&ch->worker_item); 1759 }
1759 nvgpu_spinlock_release(&g->channel_worker.items_lock); 1760 nvgpu_spinlock_release(&g->channel_worker.items_lock);
1760 1761
1761 if (!ch) { 1762 if (!ch) {
@@ -1818,7 +1819,7 @@ int nvgpu_channel_worker_init(struct gk20a *g)
1818 1819
1819 atomic_set(&g->channel_worker.put, 0); 1820 atomic_set(&g->channel_worker.put, 0);
1820 init_waitqueue_head(&g->channel_worker.wq); 1821 init_waitqueue_head(&g->channel_worker.wq);
1821 INIT_LIST_HEAD(&g->channel_worker.items); 1822 nvgpu_init_list_node(&g->channel_worker.items);
1822 nvgpu_spinlock_init(&g->channel_worker.items_lock); 1823 nvgpu_spinlock_init(&g->channel_worker.items_lock);
1823 task = kthread_run(gk20a_channel_poll_worker, g, 1824 task = kthread_run(gk20a_channel_poll_worker, g,
1824 "nvgpu_channel_poll_%s", g->name); 1825 "nvgpu_channel_poll_%s", g->name);
@@ -1861,7 +1862,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1861 } 1862 }
1862 1863
1863 nvgpu_spinlock_acquire(&g->channel_worker.items_lock); 1864 nvgpu_spinlock_acquire(&g->channel_worker.items_lock);
1864 if (!list_empty(&ch->worker_item)) { 1865 if (!nvgpu_list_empty(&ch->worker_item)) {
1865 /* 1866 /*
1866 * Already queued, so will get processed eventually. 1867 * Already queued, so will get processed eventually.
1867 * The worker is probably awake already. 1868 * The worker is probably awake already.
@@ -1870,7 +1871,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1870 gk20a_channel_put(ch); 1871 gk20a_channel_put(ch);
1871 return; 1872 return;
1872 } 1873 }
1873 list_add_tail(&ch->worker_item, &g->channel_worker.items); 1874 nvgpu_list_add_tail(&ch->worker_item, &g->channel_worker.items);
1874 nvgpu_spinlock_release(&g->channel_worker.items_lock); 1875 nvgpu_spinlock_release(&g->channel_worker.items_lock);
1875 1876
1876 __gk20a_channel_worker_wakeup(g); 1877 __gk20a_channel_worker_wakeup(g);
@@ -2646,7 +2647,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2646 INIT_LIST_HEAD(&c->joblist.dynamic.jobs); 2647 INIT_LIST_HEAD(&c->joblist.dynamic.jobs);
2647 nvgpu_init_list_node(&c->dbg_s_list); 2648 nvgpu_init_list_node(&c->dbg_s_list);
2648 nvgpu_init_list_node(&c->event_id_list); 2649 nvgpu_init_list_node(&c->event_id_list);
2649 INIT_LIST_HEAD(&c->worker_item); 2650 nvgpu_init_list_node(&c->worker_item);
2650 2651
2651 err = nvgpu_mutex_init(&c->ioctl_lock); 2652 err = nvgpu_mutex_init(&c->ioctl_lock);
2652 if (err) 2653 if (err)