summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-30 10:44:03 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:19 -0400
commit3ba374a5d94f8c2067731155afaf79f03e6c390c (patch)
treed8a2bd0d52b1e8862510aedeb7529944c0b7e28e /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent2be51206af88aba6662cdd9de5bd6c18989bbcbd (diff)
gpu: nvgpu: gk20a: Use new error macro
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1331694 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 94d193ed..c684be1f 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -27,6 +27,7 @@
27#include <nvgpu/timers.h> 27#include <nvgpu/timers.h>
28#include <nvgpu/kmem.h> 28#include <nvgpu/kmem.h>
29#include <nvgpu/dma.h> 29#include <nvgpu/dma.h>
30#include <nvgpu/log.h>
30 31
31#include "gk20a.h" 32#include "gk20a.h"
32#include "debug_gk20a.h" 33#include "debug_gk20a.h"
@@ -301,7 +302,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
301 } while (!nvgpu_timeout_expired(&timeout)); 302 } while (!nvgpu_timeout_expired(&timeout));
302 303
303 if (!channel_idle) { 304 if (!channel_idle) {
304 gk20a_err(dev_from_gk20a(ch->g), "jobs not freed for channel %d\n", 305 nvgpu_err(ch->g, "jobs not freed for channel %d\n",
305 ch->hw_chid); 306 ch->hw_chid);
306 return -EBUSY; 307 return -EBUSY;
307 } 308 }
@@ -322,7 +323,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
322 int ret; 323 int ret;
323 324
324 if (gk20a_is_channel_marked_as_tsg(ch)) { 325 if (gk20a_is_channel_marked_as_tsg(ch)) {
325 gk20a_err(dev_from_gk20a(g), "invalid operation for TSG!\n"); 326 nvgpu_err(g, "invalid operation for TSG!\n");
326 return -EINVAL; 327 return -EINVAL;
327 } 328 }
328 329
@@ -362,7 +363,7 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error)
362 ch->error_notifier->info32 = error; 363 ch->error_notifier->info32 = error;
363 ch->error_notifier->status = 0xffff; 364 ch->error_notifier->status = 0xffff;
364 365
365 gk20a_err(dev_from_gk20a(ch->g), 366 nvgpu_err(ch->g,
366 "error notifier set to %d for ch %d", error, ch->hw_chid); 367 "error notifier set to %d for ch %d", error, ch->hw_chid);
367 } 368 }
368} 369}
@@ -398,7 +399,7 @@ static void gk20a_wait_until_counter_is_N(
398 msecs_to_jiffies(5000)) > 0) 399 msecs_to_jiffies(5000)) > 0)
399 break; 400 break;
400 401
401 gk20a_warn(dev_from_gk20a(ch->g), 402 nvgpu_warn(ch->g,
402 "%s: channel %d, still waiting, %s left: %d, waiting for: %d", 403 "%s: channel %d, still waiting, %s left: %d, waiting for: %d",
403 caller, ch->hw_chid, counter_name, 404 caller, ch->hw_chid, counter_name,
404 atomic_read(counter), wait_value); 405 atomic_read(counter), wait_value);
@@ -476,7 +477,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
476 nvgpu_spinlock_acquire(&ch->ref_obtain_lock); 477 nvgpu_spinlock_acquire(&ch->ref_obtain_lock);
477 if (!ch->referenceable) { 478 if (!ch->referenceable) {
478 nvgpu_spinlock_release(&ch->ref_obtain_lock); 479 nvgpu_spinlock_release(&ch->ref_obtain_lock);
479 gk20a_err(dev_from_gk20a(ch->g), 480 nvgpu_err(ch->g,
480 "Extra %s() called to channel %u", 481 "Extra %s() called to channel %u",
481 __func__, ch->hw_chid); 482 __func__, ch->hw_chid);
482 return; 483 return;
@@ -795,7 +796,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
795 ch = allocate_channel(f); 796 ch = allocate_channel(f);
796 if (ch == NULL) { 797 if (ch == NULL) {
797 /* TBD: we want to make this virtualizable */ 798 /* TBD: we want to make this virtualizable */
798 gk20a_err(dev_from_gk20a(g), "out of hw chids"); 799 nvgpu_err(g, "out of hw chids");
799 return NULL; 800 return NULL;
800 } 801 }
801 802
@@ -813,7 +814,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
813 if (g->ops.fifo.alloc_inst(g, ch)) { 814 if (g->ops.fifo.alloc_inst(g, ch)) {
814 ch->g = NULL; 815 ch->g = NULL;
815 free_channel(f, ch); 816 free_channel(f, ch);
816 gk20a_err(dev_from_gk20a(g), 817 nvgpu_err(g,
817 "failed to open gk20a channel, out of inst mem"); 818 "failed to open gk20a channel, out of inst mem");
818 return NULL; 819 return NULL;
819 } 820 }
@@ -873,7 +874,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
873 used for inserting commands before/after user submitted buffers. */ 874 used for inserting commands before/after user submitted buffers. */
874static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c) 875static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
875{ 876{
876 struct device *d = dev_from_gk20a(c->g); 877 struct gk20a *g = c->g;
877 struct vm_gk20a *ch_vm = c->vm; 878 struct vm_gk20a *ch_vm = c->vm;
878 struct priv_cmd_queue *q = &c->priv_cmd_q; 879 struct priv_cmd_queue *q = &c->priv_cmd_q;
879 u32 size; 880 u32 size;
@@ -901,7 +902,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
901 902
902 err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem); 903 err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem);
903 if (err) { 904 if (err) {
904 gk20a_err(d, "%s: memory allocation failed\n", __func__); 905 nvgpu_err(g, "%s: memory allocation failed\n", __func__);
905 goto clean_up; 906 goto clean_up;
906 } 907 }
907 908
@@ -938,7 +939,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
938 gk20a_dbg_fn("size %d", orig_size); 939 gk20a_dbg_fn("size %d", orig_size);
939 940
940 if (!e) { 941 if (!e) {
941 gk20a_err(dev_from_gk20a(c->g), 942 nvgpu_err(c->g,
942 "ch %d: priv cmd entry is null", 943 "ch %d: priv cmd entry is null",
943 c->hw_chid); 944 c->hw_chid);
944 return -EINVAL; 945 return -EINVAL;
@@ -1016,7 +1017,7 @@ static int channel_gk20a_alloc_job(struct channel_gk20a *c,
1016 if (CIRC_SPACE(put, get, c->joblist.pre_alloc.length)) 1017 if (CIRC_SPACE(put, get, c->joblist.pre_alloc.length))
1017 *job_out = &c->joblist.pre_alloc.jobs[put]; 1018 *job_out = &c->joblist.pre_alloc.jobs[put];
1018 else { 1019 else {
1019 gk20a_warn(dev_from_gk20a(c->g), 1020 nvgpu_warn(c->g,
1020 "out of job ringbuffer space\n"); 1021 "out of job ringbuffer space\n");
1021 err = -EAGAIN; 1022 err = -EAGAIN;
1022 } 1023 }
@@ -1231,7 +1232,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1231 1232
1232 /* an address space needs to have been bound at this point. */ 1233 /* an address space needs to have been bound at this point. */
1233 if (!gk20a_channel_as_bound(c)) { 1234 if (!gk20a_channel_as_bound(c)) {
1234 gk20a_err(d, 1235 nvgpu_err(g,
1235 "not bound to an address space at time of gpfifo" 1236 "not bound to an address space at time of gpfifo"
1236 " allocation."); 1237 " allocation.");
1237 return -EINVAL; 1238 return -EINVAL;
@@ -1239,7 +1240,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1239 ch_vm = c->vm; 1240 ch_vm = c->vm;
1240 1241
1241 if (c->gpfifo.mem.size) { 1242 if (c->gpfifo.mem.size) {
1242 gk20a_err(d, "channel %d :" 1243 nvgpu_err(g, "channel %d :"
1243 "gpfifo already allocated", c->hw_chid); 1244 "gpfifo already allocated", c->hw_chid);
1244 return -EEXIST; 1245 return -EEXIST;
1245 } 1246 }
@@ -1248,7 +1249,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1248 gpfifo_size * sizeof(struct nvgpu_gpfifo), 1249 gpfifo_size * sizeof(struct nvgpu_gpfifo),
1249 &c->gpfifo.mem); 1250 &c->gpfifo.mem);
1250 if (err) { 1251 if (err) {
1251 gk20a_err(d, "%s: memory allocation failed\n", __func__); 1252 nvgpu_err(g, "%s: memory allocation failed\n", __func__);
1252 goto clean_up; 1253 goto clean_up;
1253 } 1254 }
1254 1255
@@ -1334,7 +1335,7 @@ clean_up_unmap:
1334 nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem); 1335 nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem);
1335clean_up: 1336clean_up:
1336 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); 1337 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
1337 gk20a_err(d, "fail"); 1338 nvgpu_err(g, "fail");
1338 return err; 1339 return err;
1339} 1340}
1340 1341
@@ -1607,7 +1608,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1607 return; 1608 return;
1608 } 1609 }
1609 1610
1610 gk20a_err(dev_from_gk20a(g), "Job on channel %d timed out", 1611 nvgpu_err(g, "Job on channel %d timed out",
1611 ch->hw_chid); 1612 ch->hw_chid);
1612 1613
1613 gk20a_debug_dump(g->dev); 1614 gk20a_debug_dump(g->dev);
@@ -1761,7 +1762,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
1761 * other reasons than a channel added in the items list 1762 * other reasons than a channel added in the items list
1762 * currently, so warn and ack the message. 1763 * currently, so warn and ack the message.
1763 */ 1764 */
1764 gk20a_warn(g->dev, "Spurious worker event!"); 1765 nvgpu_warn(g, "Spurious worker event!");
1765 ++*get; 1766 ++*get;
1766 break; 1767 break;
1767 } 1768 }
@@ -1820,7 +1821,7 @@ int nvgpu_channel_worker_init(struct gk20a *g)
1820 task = kthread_run(gk20a_channel_poll_worker, g, 1821 task = kthread_run(gk20a_channel_poll_worker, g,
1821 "nvgpu_channel_poll_%s", g->name); 1822 "nvgpu_channel_poll_%s", g->name);
1822 if (IS_ERR(task)) { 1823 if (IS_ERR(task)) {
1823 gk20a_err(g->dev, "failed to start channel poller thread"); 1824 nvgpu_err(g, "failed to start channel poller thread");
1824 return PTR_ERR(task); 1825 return PTR_ERR(task);
1825 } 1826 }
1826 g->channel_worker.poll_task = task; 1827 g->channel_worker.poll_task = task;
@@ -1853,7 +1854,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1853 * one ref already, so can't fail. 1854 * one ref already, so can't fail.
1854 */ 1855 */
1855 if (WARN_ON(!gk20a_channel_get(ch))) { 1856 if (WARN_ON(!gk20a_channel_get(ch))) {
1856 gk20a_warn(g->dev, "cannot get ch ref for worker!"); 1857 nvgpu_warn(g, "cannot get ch ref for worker!");
1857 return; 1858 return;
1858 } 1859 }
1859 1860
@@ -1876,7 +1877,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1876int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e) 1877int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1877{ 1878{
1878 struct priv_cmd_queue *q = &c->priv_cmd_q; 1879 struct priv_cmd_queue *q = &c->priv_cmd_q;
1879 struct device *d = dev_from_gk20a(c->g); 1880 struct gk20a *g = c->g;
1880 1881
1881 if (!e) 1882 if (!e)
1882 return 0; 1883 return 0;
@@ -1885,7 +1886,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1885 /* read the entry's valid flag before reading its contents */ 1886 /* read the entry's valid flag before reading its contents */
1886 rmb(); 1887 rmb();
1887 if ((q->get != e->off) && e->off != 0) 1888 if ((q->get != e->off) && e->off != 0)
1888 gk20a_err(d, "requests out-of-order, ch=%d\n", 1889 nvgpu_err(g, "requests out-of-order, ch=%d\n",
1889 c->hw_chid); 1890 c->hw_chid);
1890 q->get = e->off + e->size; 1891 q->get = e->off + e->size;
1891 } 1892 }
@@ -2416,7 +2417,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2416 * So, add extra_entries in user request. Also, HW with fifo size N 2417 * So, add extra_entries in user request. Also, HW with fifo size N
2417 * can accept only N-1 entreis and so the below condition */ 2418 * can accept only N-1 entreis and so the below condition */
2418 if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) { 2419 if (c->gpfifo.entry_num - 1 < num_entries + extra_entries) {
2419 gk20a_err(d, "not enough gpfifo space allocated"); 2420 nvgpu_err(g, "not enough gpfifo space allocated");
2420 return -ENOMEM; 2421 return -ENOMEM;
2421 } 2422 }
2422 2423
@@ -2430,7 +2431,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2430 2431
2431 /* an address space needs to have been bound at this point. */ 2432 /* an address space needs to have been bound at this point. */
2432 if (!gk20a_channel_as_bound(c)) { 2433 if (!gk20a_channel_as_bound(c)) {
2433 gk20a_err(d, 2434 nvgpu_err(g,
2434 "not bound to an address space at time of gpfifo" 2435 "not bound to an address space at time of gpfifo"
2435 " submission."); 2436 " submission.");
2436 return -EINVAL; 2437 return -EINVAL;
@@ -2512,7 +2513,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2512 /* released by job cleanup via syncpt or sema interrupt */ 2513 /* released by job cleanup via syncpt or sema interrupt */
2513 err = gk20a_busy(g); 2514 err = gk20a_busy(g);
2514 if (err) { 2515 if (err) {
2515 gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s", 2516 nvgpu_err(g, "failed to host gk20a to submit gpfifo, process %s",
2516 current->comm); 2517 current->comm);
2517 return err; 2518 return err;
2518 } 2519 }