summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c58
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c24
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c57
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.c23
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c17
5 files changed, 87 insertions, 92 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index c09539ff..26fbd66e 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -26,7 +26,6 @@
26#include <linux/file.h> 26#include <linux/file.h>
27#include <linux/anon_inodes.h> 27#include <linux/anon_inodes.h>
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29#include <linux/vmalloc.h>
30#include <linux/circ_buf.h> 29#include <linux/circ_buf.h>
31 30
32#include <nvgpu/semaphore.h> 31#include <nvgpu/semaphore.h>
@@ -1244,7 +1243,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
1244 1243
1245channel_release: 1244channel_release:
1246 gk20a_put(g); 1245 gk20a_put(g);
1247 kfree(filp->private_data); 1246 nvgpu_kfree(g, filp->private_data);
1248 filp->private_data = NULL; 1247 filp->private_data = NULL;
1249 return 0; 1248 return 0;
1250} 1249}
@@ -1390,7 +1389,7 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp, s32 runlist_
1390 1389
1391 trace_gk20a_channel_open(dev_name(g->dev)); 1390 trace_gk20a_channel_open(dev_name(g->dev));
1392 1391
1393 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1392 priv = nvgpu_kzalloc(g, sizeof(*priv));
1394 if (!priv) { 1393 if (!priv) {
1395 err = -ENOMEM; 1394 err = -ENOMEM;
1396 goto free_ref; 1395 goto free_ref;
@@ -1421,7 +1420,7 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp, s32 runlist_
1421 return 0; 1420 return 0;
1422 1421
1423fail_busy: 1422fail_busy:
1424 kfree(priv); 1423 nvgpu_kfree(g, priv);
1425free_ref: 1424free_ref:
1426 gk20a_put(g); 1425 gk20a_put(g);
1427 return err; 1426 return err;
@@ -1446,7 +1445,7 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
1446 int err; 1445 int err;
1447 int fd; 1446 int fd;
1448 struct file *file; 1447 struct file *file;
1449 char *name; 1448 char name[64];
1450 s32 runlist_id = args->in.runlist_id; 1449 s32 runlist_id = args->in.runlist_id;
1451 1450
1452 err = get_unused_fd_flags(O_RDWR); 1451 err = get_unused_fd_flags(O_RDWR);
@@ -1454,15 +1453,10 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
1454 return err; 1453 return err;
1455 fd = err; 1454 fd = err;
1456 1455
1457 name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d", 1456 snprintf(name, sizeof(name), "nvhost-%s-fd%d",
1458 dev_name(g->dev), fd); 1457 dev_name(g->dev), fd);
1459 if (!name) {
1460 err = -ENOMEM;
1461 goto clean_up;
1462 }
1463 1458
1464 file = anon_inode_getfile(name, g->channel.cdev.ops, NULL, O_RDWR); 1459 file = anon_inode_getfile(name, g->channel.cdev.ops, NULL, O_RDWR);
1465 kfree(name);
1466 if (IS_ERR(file)) { 1460 if (IS_ERR(file)) {
1467 err = PTR_ERR(file); 1461 err = PTR_ERR(file);
1468 goto clean_up; 1462 goto clean_up;
@@ -1609,7 +1603,7 @@ static void free_priv_cmdbuf(struct channel_gk20a *c,
1609 if (channel_gk20a_is_prealloc_enabled(c)) 1603 if (channel_gk20a_is_prealloc_enabled(c))
1610 memset(e, 0, sizeof(struct priv_cmd_entry)); 1604 memset(e, 0, sizeof(struct priv_cmd_entry));
1611 else 1605 else
1612 kfree(e); 1606 nvgpu_kfree(c->g, e);
1613} 1607}
1614 1608
1615static int channel_gk20a_alloc_job(struct channel_gk20a *c, 1609static int channel_gk20a_alloc_job(struct channel_gk20a *c,
@@ -1635,8 +1629,8 @@ static int channel_gk20a_alloc_job(struct channel_gk20a *c,
1635 err = -EAGAIN; 1629 err = -EAGAIN;
1636 } 1630 }
1637 } else { 1631 } else {
1638 *job_out = kzalloc(sizeof(struct channel_gk20a_job), 1632 *job_out = nvgpu_kzalloc(c->g,
1639 GFP_KERNEL); 1633 sizeof(struct channel_gk20a_job));
1640 if (!*job_out) 1634 if (!*job_out)
1641 err = -ENOMEM; 1635 err = -ENOMEM;
1642 } 1636 }
@@ -1659,7 +1653,7 @@ static void channel_gk20a_free_job(struct channel_gk20a *c,
1659 job->wait_cmd = wait_cmd; 1653 job->wait_cmd = wait_cmd;
1660 job->incr_cmd = incr_cmd; 1654 job->incr_cmd = incr_cmd;
1661 } else 1655 } else
1662 kfree(job); 1656 nvgpu_kfree(c->g, job);
1663} 1657}
1664 1658
1665void channel_gk20a_joblist_lock(struct channel_gk20a *c) 1659void channel_gk20a_joblist_lock(struct channel_gk20a *c)
@@ -1757,7 +1751,8 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1757 */ 1751 */
1758 size = sizeof(struct channel_gk20a_job); 1752 size = sizeof(struct channel_gk20a_job);
1759 if (num_jobs <= ULONG_MAX / size) 1753 if (num_jobs <= ULONG_MAX / size)
1760 c->joblist.pre_alloc.jobs = vzalloc(num_jobs * size); 1754 c->joblist.pre_alloc.jobs = nvgpu_vzalloc(c->g,
1755 num_jobs * size);
1761 if (!c->joblist.pre_alloc.jobs) { 1756 if (!c->joblist.pre_alloc.jobs) {
1762 err = -ENOMEM; 1757 err = -ENOMEM;
1763 goto clean_up; 1758 goto clean_up;
@@ -1770,7 +1765,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1770 */ 1765 */
1771 size = sizeof(struct priv_cmd_entry); 1766 size = sizeof(struct priv_cmd_entry);
1772 if (num_jobs <= ULONG_MAX / (size << 1)) 1767 if (num_jobs <= ULONG_MAX / (size << 1))
1773 entries = vzalloc((num_jobs << 1) * size); 1768 entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size);
1774 if (!entries) { 1769 if (!entries) {
1775 err = -ENOMEM; 1770 err = -ENOMEM;
1776 goto clean_up_joblist; 1771 goto clean_up_joblist;
@@ -1799,9 +1794,9 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1799 return 0; 1794 return 0;
1800 1795
1801clean_up_priv_cmd: 1796clean_up_priv_cmd:
1802 vfree(entries); 1797 nvgpu_vfree(c->g, entries);
1803clean_up_joblist: 1798clean_up_joblist:
1804 vfree(c->joblist.pre_alloc.jobs); 1799 nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs);
1805clean_up: 1800clean_up:
1806 memset(&c->joblist.pre_alloc, 0, sizeof(c->joblist.pre_alloc)); 1801 memset(&c->joblist.pre_alloc, 0, sizeof(c->joblist.pre_alloc));
1807 return err; 1802 return err;
@@ -1809,8 +1804,8 @@ clean_up:
1809 1804
1810static void channel_gk20a_free_prealloc_resources(struct channel_gk20a *c) 1805static void channel_gk20a_free_prealloc_resources(struct channel_gk20a *c)
1811{ 1806{
1812 vfree(c->joblist.pre_alloc.jobs[0].wait_cmd); 1807 nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs[0].wait_cmd);
1813 vfree(c->joblist.pre_alloc.jobs); 1808 nvgpu_vfree(c->g, c->joblist.pre_alloc.jobs);
1814 gk20a_free_fence_pool(c); 1809 gk20a_free_fence_pool(c);
1815 1810
1816 /* 1811 /*
@@ -2910,8 +2905,8 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2910 } 2905 }
2911 2906
2912 if (!pre_alloc_enabled) 2907 if (!pre_alloc_enabled)
2913 job->wait_cmd = kzalloc(sizeof(struct priv_cmd_entry), 2908 job->wait_cmd = nvgpu_kzalloc(g,
2914 GFP_KERNEL); 2909 sizeof(struct priv_cmd_entry));
2915 2910
2916 if (!job->wait_cmd) { 2911 if (!job->wait_cmd) {
2917 err = -ENOMEM; 2912 err = -ENOMEM;
@@ -2951,8 +2946,7 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2951 goto clean_up_wait_cmd; 2946 goto clean_up_wait_cmd;
2952 } 2947 }
2953 if (!pre_alloc_enabled) 2948 if (!pre_alloc_enabled)
2954 job->incr_cmd = kzalloc(sizeof(struct priv_cmd_entry), 2949 job->incr_cmd = nvgpu_kzalloc(g, sizeof(struct priv_cmd_entry));
2955 GFP_KERNEL);
2956 2950
2957 if (!job->incr_cmd) { 2951 if (!job->incr_cmd) {
2958 err = -ENOMEM; 2952 err = -ENOMEM;
@@ -3520,7 +3514,7 @@ static int gk20a_event_id_release(struct inode *inode, struct file *filp)
3520 3514
3521 nvgpu_mutex_destroy(&event_id_data->lock); 3515 nvgpu_mutex_destroy(&event_id_data->lock);
3522 gk20a_put(g); 3516 gk20a_put(g);
3523 kfree(event_id_data); 3517 nvgpu_kfree(g, event_id_data);
3524 filp->private_data = NULL; 3518 filp->private_data = NULL;
3525 3519
3526 return 0; 3520 return 0;
@@ -3588,7 +3582,7 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
3588 int err = 0; 3582 int err = 0;
3589 int local_fd; 3583 int local_fd;
3590 struct file *file; 3584 struct file *file;
3591 char *name; 3585 char name[64];
3592 struct gk20a_event_id_data *event_id_data; 3586 struct gk20a_event_id_data *event_id_data;
3593 3587
3594 g = gk20a_get(ch->g); 3588 g = gk20a_get(ch->g);
@@ -3608,18 +3602,16 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
3608 goto free_ref; 3602 goto free_ref;
3609 local_fd = err; 3603 local_fd = err;
3610 3604
3611 name = kasprintf(GFP_KERNEL, "nvgpu-event%d-fd%d", 3605 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
3612 event_id, local_fd); 3606 event_id, local_fd);
3613
3614 file = anon_inode_getfile(name, &gk20a_event_id_ops, 3607 file = anon_inode_getfile(name, &gk20a_event_id_ops,
3615 NULL, O_RDWR); 3608 NULL, O_RDWR);
3616 kfree(name);
3617 if (IS_ERR(file)) { 3609 if (IS_ERR(file)) {
3618 err = PTR_ERR(file); 3610 err = PTR_ERR(file);
3619 goto clean_up; 3611 goto clean_up;
3620 } 3612 }
3621 3613
3622 event_id_data = kzalloc(sizeof(*event_id_data), GFP_KERNEL); 3614 event_id_data = nvgpu_kzalloc(ch->g, sizeof(*event_id_data));
3623 if (!event_id_data) { 3615 if (!event_id_data) {
3624 err = -ENOMEM; 3616 err = -ENOMEM;
3625 goto clean_up_file; 3617 goto clean_up_file;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index b4870c33..11448094 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -19,6 +19,7 @@
19#include <linux/version.h> 19#include <linux/version.h>
20 20
21#include <nvgpu/semaphore.h> 21#include <nvgpu/semaphore.h>
22#include <nvgpu/kmem.h>
22 23
23#include "channel_sync_gk20a.h" 24#include "channel_sync_gk20a.h"
24#include "gk20a.h" 25#include "gk20a.h"
@@ -339,7 +340,7 @@ static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
339 container_of(s, struct gk20a_channel_syncpt, ops); 340 container_of(s, struct gk20a_channel_syncpt, ops);
340 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id); 341 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
341 nvhost_syncpt_put_ref_ext(sp->host1x_pdev, sp->id); 342 nvhost_syncpt_put_ref_ext(sp->host1x_pdev, sp->id);
342 kfree(sp); 343 nvgpu_kfree(sp->c->g, sp);
343} 344}
344 345
345static struct gk20a_channel_sync * 346static struct gk20a_channel_sync *
@@ -348,7 +349,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
348 struct gk20a_channel_syncpt *sp; 349 struct gk20a_channel_syncpt *sp;
349 char syncpt_name[32]; 350 char syncpt_name[32];
350 351
351 sp = kzalloc(sizeof(*sp), GFP_KERNEL); 352 sp = nvgpu_kzalloc(c->g, sizeof(*sp));
352 if (!sp) 353 if (!sp)
353 return NULL; 354 return NULL;
354 355
@@ -361,7 +362,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
361 sp->id = nvhost_get_syncpt_host_managed(sp->host1x_pdev, 362 sp->id = nvhost_get_syncpt_host_managed(sp->host1x_pdev,
362 c->hw_chid, syncpt_name); 363 c->hw_chid, syncpt_name);
363 if (!sp->id) { 364 if (!sp->id) {
364 kfree(sp); 365 nvgpu_kfree(c->g, sp);
365 gk20a_err(c->g->dev, "failed to get free syncpt"); 366 gk20a_err(c->g->dev, "failed to get free syncpt");
366 return NULL; 367 return NULL;
367 } 368 }
@@ -464,12 +465,13 @@ void gk20a_channel_cancel_pending_sema_waits(struct gk20a *g)
464 list_del_init(&work->entry); 465 list_del_init(&work->entry);
465 466
466 /* 467 /*
467 * Only kfree() work if the cancel is successful. Otherwise it's 468 * Only nvgpu_kfree() work if the cancel is successful.
468 * in use by the gk20a_channel_semaphore_launcher() code. 469 * Otherwise it's in use by the
470 * gk20a_channel_semaphore_launcher() code.
469 */ 471 */
470 ret = sync_fence_cancel_async(work->fence, &work->waiter); 472 ret = sync_fence_cancel_async(work->fence, &work->waiter);
471 if (ret == 0) 473 if (ret == 0)
472 kfree(work); 474 nvgpu_kfree(g, work);
473 } 475 }
474} 476}
475 477
@@ -503,7 +505,7 @@ static void gk20a_channel_semaphore_launcher(
503 sync_fence_put(fence); 505 sync_fence_put(fence);
504 nvgpu_semaphore_release(w->sema); 506 nvgpu_semaphore_release(w->sema);
505 nvgpu_semaphore_put(w->sema); 507 nvgpu_semaphore_put(w->sema);
506 kfree(w); 508 nvgpu_kfree(g, w);
507} 509}
508#endif 510#endif
509 511
@@ -706,7 +708,7 @@ static int gk20a_channel_semaphore_wait_fd(
706 goto clean_up_sync_fence; 708 goto clean_up_sync_fence;
707 } 709 }
708 710
709 w = kzalloc(sizeof(*w), GFP_KERNEL); 711 w = nvgpu_kzalloc(c->g, sizeof(*w));
710 if (!w) { 712 if (!w) {
711 err = -ENOMEM; 713 err = -ENOMEM;
712 goto clean_up_priv_cmd; 714 goto clean_up_priv_cmd;
@@ -766,7 +768,7 @@ clean_up_sema:
766 nvgpu_semaphore_put(w->sema); 768 nvgpu_semaphore_put(w->sema);
767 nvgpu_semaphore_put(w->sema); 769 nvgpu_semaphore_put(w->sema);
768clean_up_worker: 770clean_up_worker:
769 kfree(w); 771 nvgpu_kfree(c->g, w);
770clean_up_priv_cmd: 772clean_up_priv_cmd:
771 gk20a_free_priv_cmdbuf(c, entry); 773 gk20a_free_priv_cmdbuf(c, entry);
772clean_up_sync_fence: 774clean_up_sync_fence:
@@ -917,7 +919,7 @@ static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
917 /* The sema pool is cleaned up by the VM destroy. */ 919 /* The sema pool is cleaned up by the VM destroy. */
918 sema->pool = NULL; 920 sema->pool = NULL;
919 921
920 kfree(sema); 922 nvgpu_kfree(sema->c->g, sema);
921} 923}
922 924
923static struct gk20a_channel_sync * 925static struct gk20a_channel_sync *
@@ -930,7 +932,7 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c)
930 if (WARN_ON(!c->vm)) 932 if (WARN_ON(!c->vm))
931 return NULL; 933 return NULL;
932 934
933 sema = kzalloc(sizeof(*sema), GFP_KERNEL); 935 sema = nvgpu_kzalloc(c->g, sizeof(*sema));
934 if (!sema) 936 if (!sema)
935 return NULL; 937 return NULL;
936 sema->c = c; 938 sema->c = c;
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 2fa939b9..b390daa4 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
23#include <trace/events/gk20a.h> 22#include <trace/events/gk20a.h>
24#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
@@ -27,6 +26,7 @@
27 26
28#include <nvgpu/timers.h> 27#include <nvgpu/timers.h>
29#include <nvgpu/semaphore.h> 28#include <nvgpu/semaphore.h>
29#include <nvgpu/kmem.h>
30 30
31#include "gk20a.h" 31#include "gk20a.h"
32#include "debug_gk20a.h" 32#include "debug_gk20a.h"
@@ -483,10 +483,10 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
483 gk20a_gmmu_free(g, &runlist->mem[i]); 483 gk20a_gmmu_free(g, &runlist->mem[i]);
484 } 484 }
485 485
486 kfree(runlist->active_channels); 486 nvgpu_kfree(g, runlist->active_channels);
487 runlist->active_channels = NULL; 487 runlist->active_channels = NULL;
488 488
489 kfree(runlist->active_tsgs); 489 nvgpu_kfree(g, runlist->active_tsgs);
490 runlist->active_tsgs = NULL; 490 runlist->active_tsgs = NULL;
491 491
492 nvgpu_mutex_destroy(&runlist->mutex); 492 nvgpu_mutex_destroy(&runlist->mutex);
@@ -495,7 +495,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
495 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * 495 memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) *
496 f->max_runlists)); 496 f->max_runlists));
497 497
498 kfree(f->runlist_info); 498 nvgpu_kfree(g, f->runlist_info);
499 f->runlist_info = NULL; 499 f->runlist_info = NULL;
500 f->max_runlists = 0; 500 f->max_runlists = 0;
501} 501}
@@ -538,8 +538,8 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
538 538
539 } 539 }
540 540
541 vfree(f->channel); 541 nvgpu_vfree(g, f->channel);
542 vfree(f->tsg); 542 nvgpu_vfree(g, f->tsg);
543 if (g->ops.mm.is_bar1_supported(g)) 543 if (g->ops.mm.is_bar1_supported(g))
544 gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); 544 gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd);
545 else 545 else
@@ -547,11 +547,11 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
547 547
548 gk20a_fifo_delete_runlist(f); 548 gk20a_fifo_delete_runlist(f);
549 549
550 kfree(f->pbdma_map); 550 nvgpu_kfree(g, f->pbdma_map);
551 f->pbdma_map = NULL; 551 f->pbdma_map = NULL;
552 kfree(f->engine_info); 552 nvgpu_kfree(g, f->engine_info);
553 f->engine_info = NULL; 553 f->engine_info = NULL;
554 kfree(f->active_engines_list); 554 nvgpu_kfree(g, f->active_engines_list);
555 f->active_engines_list = NULL; 555 f->active_engines_list = NULL;
556#ifdef CONFIG_DEBUG_FS 556#ifdef CONFIG_DEBUG_FS
557 nvgpu_mutex_acquire(&f->profile.lock); 557 nvgpu_mutex_acquire(&f->profile.lock);
@@ -654,8 +654,9 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
654 gk20a_dbg_fn(""); 654 gk20a_dbg_fn("");
655 655
656 f->max_runlists = g->ops.fifo.eng_runlist_base_size(); 656 f->max_runlists = g->ops.fifo.eng_runlist_base_size();
657 f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) * 657 f->runlist_info = nvgpu_kzalloc(g,
658 f->max_runlists, GFP_KERNEL); 658 sizeof(struct fifo_runlist_info_gk20a) *
659 f->max_runlists);
659 if (!f->runlist_info) 660 if (!f->runlist_info)
660 goto clean_up_runlist; 661 goto clean_up_runlist;
661 662
@@ -666,14 +667,14 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
666 runlist = &f->runlist_info[runlist_id]; 667 runlist = &f->runlist_info[runlist_id];
667 668
668 runlist->active_channels = 669 runlist->active_channels =
669 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE), 670 nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
670 GFP_KERNEL); 671 BITS_PER_BYTE));
671 if (!runlist->active_channels) 672 if (!runlist->active_channels)
672 goto clean_up_runlist; 673 goto clean_up_runlist;
673 674
674 runlist->active_tsgs = 675 runlist->active_tsgs =
675 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE), 676 nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
676 GFP_KERNEL); 677 BITS_PER_BYTE));
677 if (!runlist->active_tsgs) 678 if (!runlist->active_tsgs)
678 goto clean_up_runlist; 679 goto clean_up_runlist;
679 680
@@ -905,16 +906,14 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
905 906
906 f->userd_entry_size = 1 << ram_userd_base_shift_v(); 907 f->userd_entry_size = 1 << ram_userd_base_shift_v();
907 908
908 f->channel = vzalloc(f->num_channels * sizeof(*f->channel)); 909 f->channel = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->channel));
909 f->tsg = vzalloc(f->num_channels * sizeof(*f->tsg)); 910 f->tsg = nvgpu_vzalloc(g, f->num_channels * sizeof(*f->tsg));
910 f->pbdma_map = kzalloc(f->num_pbdma * sizeof(*f->pbdma_map), 911 f->pbdma_map = nvgpu_kzalloc(g, f->num_pbdma * sizeof(*f->pbdma_map));
911 GFP_KERNEL); 912 f->engine_info = nvgpu_kzalloc(g, f->max_engines *
912 f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info), 913 sizeof(*f->engine_info));
913 GFP_KERNEL); 914 f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32));
914 f->active_engines_list = kzalloc(f->max_engines * sizeof(u32),
915 GFP_KERNEL);
916 915
917 if (!(f->channel && f->pbdma_map && f->engine_info && 916 if (!(f->channel && f->tsg && f->pbdma_map && f->engine_info &&
918 f->active_engines_list)) { 917 f->active_engines_list)) {
919 err = -ENOMEM; 918 err = -ENOMEM;
920 goto clean_up; 919 goto clean_up;
@@ -977,15 +976,15 @@ clean_up:
977 else 976 else
978 gk20a_gmmu_free(g, &f->userd); 977 gk20a_gmmu_free(g, &f->userd);
979 978
980 vfree(f->channel); 979 nvgpu_vfree(g, f->channel);
981 f->channel = NULL; 980 f->channel = NULL;
982 vfree(f->tsg); 981 nvgpu_vfree(g, f->tsg);
983 f->tsg = NULL; 982 f->tsg = NULL;
984 kfree(f->pbdma_map); 983 nvgpu_kfree(g, f->pbdma_map);
985 f->pbdma_map = NULL; 984 f->pbdma_map = NULL;
986 kfree(f->engine_info); 985 nvgpu_kfree(g, f->engine_info);
987 f->engine_info = NULL; 986 f->engine_info = NULL;
988 kfree(f->active_engines_list); 987 nvgpu_kfree(g, f->active_engines_list);
989 f->active_engines_list = NULL; 988 f->active_engines_list = NULL;
990 989
991 return err; 990 return err;
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
index a73e7993..6b372489 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
@@ -12,7 +12,6 @@
12 */ 12 */
13 13
14#include <asm/barrier.h> 14#include <asm/barrier.h>
15#include <linux/slab.h>
16#include <linux/kthread.h> 15#include <linux/kthread.h>
17#include <linux/circ_buf.h> 16#include <linux/circ_buf.h>
18#include <linux/delay.h> 17#include <linux/delay.h>
@@ -24,6 +23,8 @@
24#include <linux/log2.h> 23#include <linux/log2.h>
25#include <uapi/linux/nvgpu.h> 24#include <uapi/linux/nvgpu.h>
26 25
26#include <nvgpu/kmem.h>
27
27#include "ctxsw_trace_gk20a.h" 28#include "ctxsw_trace_gk20a.h"
28#include "gk20a.h" 29#include "gk20a.h"
29#include "gr_gk20a.h" 30#include "gr_gk20a.h"
@@ -154,7 +155,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
154 return -ENOSPC; 155 return -ENOSPC;
155 } 156 }
156 157
157 bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL); 158 bitmap = nvgpu_kzalloc(sched->g, sched->bitmap_size);
158 if (!bitmap) 159 if (!bitmap)
159 return -ENOMEM; 160 return -ENOMEM;
160 161
@@ -172,7 +173,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
172 bitmap, sched->bitmap_size)) 173 bitmap, sched->bitmap_size))
173 err = -EFAULT; 174 err = -EFAULT;
174 175
175 kfree(bitmap); 176 nvgpu_kfree(sched->g, bitmap);
176 177
177 return err; 178 return err;
178} 179}
@@ -650,15 +651,15 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
650 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu", 651 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "g=%p sched=%p size=%zu",
651 g, sched, sched->bitmap_size); 652 g, sched, sched->bitmap_size);
652 653
653 sched->active_tsg_bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL); 654 sched->active_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
654 if (!sched->active_tsg_bitmap) 655 if (!sched->active_tsg_bitmap)
655 return -ENOMEM; 656 return -ENOMEM;
656 657
657 sched->recent_tsg_bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL); 658 sched->recent_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
658 if (!sched->recent_tsg_bitmap) 659 if (!sched->recent_tsg_bitmap)
659 goto free_active; 660 goto free_active;
660 661
661 sched->ref_tsg_bitmap = kzalloc(sched->bitmap_size, GFP_KERNEL); 662 sched->ref_tsg_bitmap = nvgpu_kzalloc(g, sched->bitmap_size);
662 if (!sched->ref_tsg_bitmap) 663 if (!sched->ref_tsg_bitmap)
663 goto free_recent; 664 goto free_recent;
664 665
@@ -672,10 +673,10 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
672 return 0; 673 return 0;
673 674
674free_recent: 675free_recent:
675 kfree(sched->recent_tsg_bitmap); 676 nvgpu_kfree(g, sched->recent_tsg_bitmap);
676 677
677free_active: 678free_active:
678 kfree(sched->active_tsg_bitmap); 679 nvgpu_kfree(g, sched->active_tsg_bitmap);
679 680
680 return -ENOMEM; 681 return -ENOMEM;
681} 682}
@@ -684,9 +685,9 @@ void gk20a_sched_ctrl_cleanup(struct gk20a *g)
684{ 685{
685 struct gk20a_sched_ctrl *sched = &g->sched_ctrl; 686 struct gk20a_sched_ctrl *sched = &g->sched_ctrl;
686 687
687 kfree(sched->active_tsg_bitmap); 688 nvgpu_kfree(g, sched->active_tsg_bitmap);
688 kfree(sched->recent_tsg_bitmap); 689 nvgpu_kfree(g, sched->recent_tsg_bitmap);
689 kfree(sched->ref_tsg_bitmap); 690 nvgpu_kfree(g, sched->ref_tsg_bitmap);
690 sched->active_tsg_bitmap = NULL; 691 sched->active_tsg_bitmap = NULL;
691 sched->recent_tsg_bitmap = NULL; 692 sched->recent_tsg_bitmap = NULL;
692 sched->ref_tsg_bitmap = NULL; 693 sched->ref_tsg_bitmap = NULL;
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 270fed85..21b50700 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -22,6 +22,8 @@
22#include <uapi/linux/nvgpu.h> 22#include <uapi/linux/nvgpu.h>
23#include <linux/anon_inodes.h> 23#include <linux/anon_inodes.h>
24 24
25#include <nvgpu/kmem.h>
26
25#include "gk20a.h" 27#include "gk20a.h"
26 28
27#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h> 29#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
@@ -257,7 +259,7 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
257 int err = 0; 259 int err = 0;
258 int local_fd; 260 int local_fd;
259 struct file *file; 261 struct file *file;
260 char *name; 262 char name[64];
261 struct gk20a_event_id_data *event_id_data; 263 struct gk20a_event_id_data *event_id_data;
262 struct gk20a *g; 264 struct gk20a *g;
263 265
@@ -278,18 +280,17 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
278 goto free_ref; 280 goto free_ref;
279 local_fd = err; 281 local_fd = err;
280 282
281 name = kasprintf(GFP_KERNEL, "nvgpu-event%d-fd%d", 283 snprintf(name, sizeof(name), "nvgpu-event%d-fd%d",
282 event_id, local_fd); 284 event_id, local_fd);
283 285
284 file = anon_inode_getfile(name, &gk20a_event_id_ops, 286 file = anon_inode_getfile(name, &gk20a_event_id_ops,
285 NULL, O_RDWR); 287 NULL, O_RDWR);
286 kfree(name);
287 if (IS_ERR(file)) { 288 if (IS_ERR(file)) {
288 err = PTR_ERR(file); 289 err = PTR_ERR(file);
289 goto clean_up; 290 goto clean_up;
290 } 291 }
291 292
292 event_id_data = kzalloc(sizeof(*event_id_data), GFP_KERNEL); 293 event_id_data = nvgpu_kzalloc(tsg->g, sizeof(*event_id_data));
293 if (!event_id_data) { 294 if (!event_id_data) {
294 err = -ENOMEM; 295 err = -ENOMEM;
295 goto clean_up_file; 296 goto clean_up_file;
@@ -428,7 +429,7 @@ int gk20a_tsg_open(struct gk20a *g, struct file *filp)
428 429
429 gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev)); 430 gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev));
430 431
431 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 432 priv = nvgpu_kmalloc(g, sizeof(*priv));
432 if (!priv) { 433 if (!priv) {
433 err = -ENOMEM; 434 err = -ENOMEM;
434 goto free_ref; 435 goto free_ref;
@@ -436,7 +437,7 @@ int gk20a_tsg_open(struct gk20a *g, struct file *filp)
436 437
437 tsg = acquire_unused_tsg(&g->fifo); 438 tsg = acquire_unused_tsg(&g->fifo);
438 if (!tsg) { 439 if (!tsg) {
439 kfree(priv); 440 nvgpu_kfree(g, priv);
440 err = -ENOMEM; 441 err = -ENOMEM;
441 goto free_ref; 442 goto free_ref;
442 } 443 }
@@ -533,7 +534,7 @@ int gk20a_tsg_dev_release(struct inode *inode, struct file *filp)
533 struct tsg_gk20a *tsg = priv->tsg; 534 struct tsg_gk20a *tsg = priv->tsg;
534 535
535 kref_put(&tsg->refcount, gk20a_tsg_release); 536 kref_put(&tsg->refcount, gk20a_tsg_release);
536 kfree(priv); 537 nvgpu_kfree(tsg->g, priv);
537 return 0; 538 return 0;
538} 539}
539 540