diff options
author | Deepak Nibade <dnibade@nvidia.com> | 2017-04-11 08:24:27 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-04-12 18:34:25 -0400 |
commit | a6adaaab7a90d86a71943f9977920ad7f726cea8 (patch) | |
tree | 79dcfa4f268fb5b5784996aa3b91f86e8a6ef640 | |
parent | f474a9e0e41d62f6a804c6501068da621556013e (diff) |
gpu: nvgpu: use nvgpu list for channel worker item
Use nvgpu list APIs instead of linux list APIs
to store channel worker items
Jira NVGPU-13
Change-Id: I01d214810ca2495bd0a644dd1a2816ab8e526981
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/1460575
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 19 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.h | 9 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 2 |
3 files changed, 19 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index b729e0f8..f5ba9e50 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |||
@@ -1736,7 +1736,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get) | |||
1736 | gk20a_dbg_fn(""); | 1736 | gk20a_dbg_fn(""); |
1737 | 1737 | ||
1738 | while (__gk20a_channel_worker_pending(g, *get)) { | 1738 | while (__gk20a_channel_worker_pending(g, *get)) { |
1739 | struct channel_gk20a *ch; | 1739 | struct channel_gk20a *ch = NULL; |
1740 | 1740 | ||
1741 | /* | 1741 | /* |
1742 | * If a channel is on the list, it's guaranteed to be handled | 1742 | * If a channel is on the list, it's guaranteed to be handled |
@@ -1751,11 +1751,12 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get) | |||
1751 | * enqueue are harmless. | 1751 | * enqueue are harmless. |
1752 | */ | 1752 | */ |
1753 | nvgpu_spinlock_acquire(&g->channel_worker.items_lock); | 1753 | nvgpu_spinlock_acquire(&g->channel_worker.items_lock); |
1754 | ch = list_first_entry_or_null(&g->channel_worker.items, | 1754 | if (!nvgpu_list_empty(&g->channel_worker.items)) { |
1755 | struct channel_gk20a, | 1755 | ch = nvgpu_list_first_entry(&g->channel_worker.items, |
1756 | channel_gk20a, | ||
1756 | worker_item); | 1757 | worker_item); |
1757 | if (ch) | 1758 | nvgpu_list_del(&ch->worker_item); |
1758 | list_del_init(&ch->worker_item); | 1759 | } |
1759 | nvgpu_spinlock_release(&g->channel_worker.items_lock); | 1760 | nvgpu_spinlock_release(&g->channel_worker.items_lock); |
1760 | 1761 | ||
1761 | if (!ch) { | 1762 | if (!ch) { |
@@ -1818,7 +1819,7 @@ int nvgpu_channel_worker_init(struct gk20a *g) | |||
1818 | 1819 | ||
1819 | atomic_set(&g->channel_worker.put, 0); | 1820 | atomic_set(&g->channel_worker.put, 0); |
1820 | init_waitqueue_head(&g->channel_worker.wq); | 1821 | init_waitqueue_head(&g->channel_worker.wq); |
1821 | INIT_LIST_HEAD(&g->channel_worker.items); | 1822 | nvgpu_init_list_node(&g->channel_worker.items); |
1822 | nvgpu_spinlock_init(&g->channel_worker.items_lock); | 1823 | nvgpu_spinlock_init(&g->channel_worker.items_lock); |
1823 | task = kthread_run(gk20a_channel_poll_worker, g, | 1824 | task = kthread_run(gk20a_channel_poll_worker, g, |
1824 | "nvgpu_channel_poll_%s", g->name); | 1825 | "nvgpu_channel_poll_%s", g->name); |
@@ -1861,7 +1862,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) | |||
1861 | } | 1862 | } |
1862 | 1863 | ||
1863 | nvgpu_spinlock_acquire(&g->channel_worker.items_lock); | 1864 | nvgpu_spinlock_acquire(&g->channel_worker.items_lock); |
1864 | if (!list_empty(&ch->worker_item)) { | 1865 | if (!nvgpu_list_empty(&ch->worker_item)) { |
1865 | /* | 1866 | /* |
1866 | * Already queued, so will get processed eventually. | 1867 | * Already queued, so will get processed eventually. |
1867 | * The worker is probably awake already. | 1868 | * The worker is probably awake already. |
@@ -1870,7 +1871,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch) | |||
1870 | gk20a_channel_put(ch); | 1871 | gk20a_channel_put(ch); |
1871 | return; | 1872 | return; |
1872 | } | 1873 | } |
1873 | list_add_tail(&ch->worker_item, &g->channel_worker.items); | 1874 | nvgpu_list_add_tail(&ch->worker_item, &g->channel_worker.items); |
1874 | nvgpu_spinlock_release(&g->channel_worker.items_lock); | 1875 | nvgpu_spinlock_release(&g->channel_worker.items_lock); |
1875 | 1876 | ||
1876 | __gk20a_channel_worker_wakeup(g); | 1877 | __gk20a_channel_worker_wakeup(g); |
@@ -2646,7 +2647,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid) | |||
2646 | INIT_LIST_HEAD(&c->joblist.dynamic.jobs); | 2647 | INIT_LIST_HEAD(&c->joblist.dynamic.jobs); |
2647 | nvgpu_init_list_node(&c->dbg_s_list); | 2648 | nvgpu_init_list_node(&c->dbg_s_list); |
2648 | nvgpu_init_list_node(&c->event_id_list); | 2649 | nvgpu_init_list_node(&c->event_id_list); |
2649 | INIT_LIST_HEAD(&c->worker_item); | 2650 | nvgpu_init_list_node(&c->worker_item); |
2650 | 2651 | ||
2651 | err = nvgpu_mutex_init(&c->ioctl_lock); | 2652 | err = nvgpu_mutex_init(&c->ioctl_lock); |
2652 | if (err) | 2653 | if (err) |
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h index 0514c5b8..d52c1b6f 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h | |||
@@ -217,7 +217,7 @@ struct channel_gk20a { | |||
217 | 217 | ||
218 | struct channel_gk20a_timeout timeout; | 218 | struct channel_gk20a_timeout timeout; |
219 | /* for job cleanup handling in the background worker */ | 219 | /* for job cleanup handling in the background worker */ |
220 | struct list_head worker_item; | 220 | struct nvgpu_list_node worker_item; |
221 | 221 | ||
222 | #if defined(CONFIG_GK20A_CYCLE_STATS) | 222 | #if defined(CONFIG_GK20A_CYCLE_STATS) |
223 | struct { | 223 | struct { |
@@ -284,6 +284,13 @@ channel_gk20a_from_ch_entry(struct nvgpu_list_node *node) | |||
284 | ((uintptr_t)node - offsetof(struct channel_gk20a, ch_entry)); | 284 | ((uintptr_t)node - offsetof(struct channel_gk20a, ch_entry)); |
285 | }; | 285 | }; |
286 | 286 | ||
287 | static inline struct channel_gk20a * | ||
288 | channel_gk20a_from_worker_item(struct nvgpu_list_node *node) | ||
289 | { | ||
290 | return (struct channel_gk20a *) | ||
291 | ((uintptr_t)node - offsetof(struct channel_gk20a, worker_item)); | ||
292 | }; | ||
293 | |||
287 | static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch) | 294 | static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch) |
288 | { | 295 | { |
289 | return !!ch->vm; | 296 | return !!ch->vm; |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 7d7d573a..9760ba35 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -1107,7 +1107,7 @@ struct gk20a { | |||
1107 | struct task_struct *poll_task; | 1107 | struct task_struct *poll_task; |
1108 | atomic_t put; | 1108 | atomic_t put; |
1109 | wait_queue_head_t wq; | 1109 | wait_queue_head_t wq; |
1110 | struct list_head items; | 1110 | struct nvgpu_list_node items; |
1111 | struct nvgpu_spinlock items_lock; | 1111 | struct nvgpu_spinlock items_lock; |
1112 | } channel_worker; | 1112 | } channel_worker; |
1113 | 1113 | ||