summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-05-07 06:09:36 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:09:20 -0400
commit4ac110cb8a46a0c91a4a1e39c168de1b49688971 (patch)
tree5cf832c256e8e7b4c28c2a08ed28614d44c00ea4 /drivers/gpu/nvgpu/gk20a
parent4d93f777450e5bf46d5001426b3a437810cd852b (diff)
gpu: nvgpu: Register as subdomain of host1x
Add gk20a as a sub power domain of host1x. This enforces keeping host1x on when using gk20a. Bug 200003112 Change-Id: I08db595bc7b819d86d33fb98af0d8fb4de369463 Signed-off-by: Arto Merilainen <amerilainen@nvidia.com> Reviewed-on: http://git-master/r/407543 Reviewed-by: Riham Haidar <rhaidar@nvidia.com> Tested-by: Riham Haidar <rhaidar@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c50
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c52
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a.h28
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c39
9 files changed, 50 insertions, 137 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
index 65c26938..1d604b83 100644
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
@@ -227,7 +227,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
227 return -EFAULT; 227 return -EFAULT;
228 } 228 }
229 229
230 err = gk20a_channel_busy(g->dev); 230 err = gk20a_busy(g->dev);
231 if (err) 231 if (err)
232 return err; 232 return err;
233 233
@@ -284,7 +284,7 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
284 break; 284 break;
285 } 285 }
286 286
287 gk20a_channel_idle(g->dev); 287 gk20a_idle(g->dev);
288 288
289 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) 289 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
290 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); 290 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 7cf5fcde..1b7191db 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -697,14 +697,14 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
697 697
698 trace_gk20a_channel_release(dev_name(&g->dev->dev)); 698 trace_gk20a_channel_release(dev_name(&g->dev->dev));
699 699
700 err = gk20a_channel_busy(ch->g->dev); 700 err = gk20a_busy(ch->g->dev);
701 if (err) { 701 if (err) {
702 gk20a_err(dev_from_gk20a(g), "failed to release channel %d", 702 gk20a_err(dev_from_gk20a(g), "failed to release channel %d",
703 ch->hw_chid); 703 ch->hw_chid);
704 return err; 704 return err;
705 } 705 }
706 gk20a_free_channel(ch, true); 706 gk20a_free_channel(ch, true);
707 gk20a_channel_idle(ch->g->dev); 707 gk20a_idle(ch->g->dev);
708 708
709 gk20a_put_client(g); 709 gk20a_put_client(g);
710 filp->private_data = NULL; 710 filp->private_data = NULL;
@@ -767,14 +767,14 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp)
767 return err; 767 return err;
768 } 768 }
769 769
770 err = gk20a_channel_busy(g->dev); 770 err = gk20a_busy(g->dev);
771 if (err) { 771 if (err) {
772 gk20a_put_client(g); 772 gk20a_put_client(g);
773 gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err); 773 gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err);
774 return err; 774 return err;
775 } 775 }
776 ch = gk20a_open_new_channel(g); 776 ch = gk20a_open_new_channel(g);
777 gk20a_channel_idle(g->dev); 777 gk20a_idle(g->dev);
778 if (!ch) { 778 if (!ch) {
779 gk20a_put_client(g); 779 gk20a_put_client(g);
780 gk20a_err(dev_from_gk20a(g), 780 gk20a_err(dev_from_gk20a(g),
@@ -1408,12 +1408,12 @@ void gk20a_channel_update(struct channel_gk20a *c, int nr_completed)
1408 1408
1409 list_del_init(&job->list); 1409 list_del_init(&job->list);
1410 kfree(job); 1410 kfree(job);
1411 gk20a_channel_idle(g->dev); 1411 gk20a_idle(g->dev);
1412 } 1412 }
1413 mutex_unlock(&c->jobs_lock); 1413 mutex_unlock(&c->jobs_lock);
1414 1414
1415 for (i = 0; i < nr_completed; i++) 1415 for (i = 0; i < nr_completed; i++)
1416 gk20a_channel_idle(c->g->dev); 1416 gk20a_idle(c->g->dev);
1417} 1417}
1418 1418
1419static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, 1419static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
@@ -1455,7 +1455,7 @@ static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1455 gk20a_dbg_info("channel %d", c->hw_chid); 1455 gk20a_dbg_info("channel %d", c->hw_chid);
1456 1456
1457 /* gk20a_channel_update releases this ref. */ 1457 /* gk20a_channel_update releases this ref. */
1458 err = gk20a_channel_busy(g->dev); 1458 err = gk20a_busy(g->dev);
1459 if (err) { 1459 if (err) {
1460 gk20a_err(d, "failed to host gk20a to submit gpfifo"); 1460 gk20a_err(d, "failed to host gk20a to submit gpfifo");
1461 return err; 1461 return err;
@@ -1606,7 +1606,7 @@ clean_up:
1606 gk20a_err(d, "fail"); 1606 gk20a_err(d, "fail");
1607 free_priv_cmdbuf(c, wait_cmd); 1607 free_priv_cmdbuf(c, wait_cmd);
1608 free_priv_cmdbuf(c, incr_cmd); 1608 free_priv_cmdbuf(c, incr_cmd);
1609 gk20a_channel_idle(g->dev); 1609 gk20a_idle(g->dev);
1610 return err; 1610 return err;
1611} 1611}
1612 1612
@@ -1856,7 +1856,6 @@ int gk20a_channel_suspend(struct gk20a *g)
1856 for (chid = 0; chid < f->num_channels; chid++) { 1856 for (chid = 0; chid < f->num_channels; chid++) {
1857 struct channel_gk20a *c = &f->channel[chid]; 1857 struct channel_gk20a *c = &f->channel[chid];
1858 if (c->in_use && c->obj_class != KEPLER_C) { 1858 if (c->in_use && c->obj_class != KEPLER_C) {
1859 gk20a_platform_channel_busy(g->dev);
1860 err = gk20a_channel_submit_wfi(c); 1859 err = gk20a_channel_submit_wfi(c);
1861 if (err) { 1860 if (err) {
1862 gk20a_err(d, "cannot idle channel %d\n", 1861 gk20a_err(d, "cannot idle channel %d\n",
@@ -1866,7 +1865,6 @@ int gk20a_channel_suspend(struct gk20a *g)
1866 1865
1867 c->sync->wait_cpu(c->sync, &c->last_submit_fence, 1866 c->sync->wait_cpu(c->sync, &c->last_submit_fence,
1868 500000); 1867 500000);
1869 gk20a_platform_channel_idle(g->dev);
1870 break; 1868 break;
1871 } 1869 }
1872 } 1870 }
@@ -2037,7 +2035,7 @@ long gk20a_channel_ioctl(struct file *filp,
2037 case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD: 2035 case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
2038 break; 2036 break;
2039 case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX: 2037 case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
2040 err = gk20a_channel_busy(dev); 2038 err = gk20a_busy(dev);
2041 if (err) { 2039 if (err) {
2042 dev_err(&dev->dev, 2040 dev_err(&dev->dev,
2043 "%s: failed to host gk20a for ioctl cmd: 0x%x", 2041 "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2046,10 +2044,10 @@ long gk20a_channel_ioctl(struct file *filp,
2046 } 2044 }
2047 err = gk20a_alloc_obj_ctx(ch, 2045 err = gk20a_alloc_obj_ctx(ch,
2048 (struct nvhost_alloc_obj_ctx_args *)buf); 2046 (struct nvhost_alloc_obj_ctx_args *)buf);
2049 gk20a_channel_idle(dev); 2047 gk20a_idle(dev);
2050 break; 2048 break;
2051 case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX: 2049 case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
2052 err = gk20a_channel_busy(dev); 2050 err = gk20a_busy(dev);
2053 if (err) { 2051 if (err) {
2054 dev_err(&dev->dev, 2052 dev_err(&dev->dev,
2055 "%s: failed to host gk20a for ioctl cmd: 0x%x", 2053 "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2058,10 +2056,10 @@ long gk20a_channel_ioctl(struct file *filp,
2058 } 2056 }
2059 err = gk20a_free_obj_ctx(ch, 2057 err = gk20a_free_obj_ctx(ch,
2060 (struct nvhost_free_obj_ctx_args *)buf); 2058 (struct nvhost_free_obj_ctx_args *)buf);
2061 gk20a_channel_idle(dev); 2059 gk20a_idle(dev);
2062 break; 2060 break;
2063 case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO: 2061 case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
2064 err = gk20a_channel_busy(dev); 2062 err = gk20a_busy(dev);
2065 if (err) { 2063 if (err) {
2066 dev_err(&dev->dev, 2064 dev_err(&dev->dev,
2067 "%s: failed to host gk20a for ioctl cmd: 0x%x", 2065 "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2070,14 +2068,14 @@ long gk20a_channel_ioctl(struct file *filp,
2070 } 2068 }
2071 err = gk20a_alloc_channel_gpfifo(ch, 2069 err = gk20a_alloc_channel_gpfifo(ch,
2072 (struct nvhost_alloc_gpfifo_args *)buf); 2070 (struct nvhost_alloc_gpfifo_args *)buf);
2073 gk20a_channel_idle(dev); 2071 gk20a_idle(dev);
2074 break; 2072 break;
2075 case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO: 2073 case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
2076 err = gk20a_ioctl_channel_submit_gpfifo(ch, 2074 err = gk20a_ioctl_channel_submit_gpfifo(ch,
2077 (struct nvhost_submit_gpfifo_args *)buf); 2075 (struct nvhost_submit_gpfifo_args *)buf);
2078 break; 2076 break;
2079 case NVHOST_IOCTL_CHANNEL_WAIT: 2077 case NVHOST_IOCTL_CHANNEL_WAIT:
2080 err = gk20a_channel_busy(dev); 2078 err = gk20a_busy(dev);
2081 if (err) { 2079 if (err) {
2082 dev_err(&dev->dev, 2080 dev_err(&dev->dev,
2083 "%s: failed to host gk20a for ioctl cmd: 0x%x", 2081 "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2086,10 +2084,10 @@ long gk20a_channel_ioctl(struct file *filp,
2086 } 2084 }
2087 err = gk20a_channel_wait(ch, 2085 err = gk20a_channel_wait(ch,
2088 (struct nvhost_wait_args *)buf); 2086 (struct nvhost_wait_args *)buf);
2089 gk20a_channel_idle(dev); 2087 gk20a_idle(dev);
2090 break; 2088 break;
2091 case NVHOST_IOCTL_CHANNEL_ZCULL_BIND: 2089 case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
2092 err = gk20a_channel_busy(dev); 2090 err = gk20a_busy(dev);
2093 if (err) { 2091 if (err) {
2094 dev_err(&dev->dev, 2092 dev_err(&dev->dev,
2095 "%s: failed to host gk20a for ioctl cmd: 0x%x", 2093 "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2098,10 +2096,10 @@ long gk20a_channel_ioctl(struct file *filp,
2098 } 2096 }
2099 err = gk20a_channel_zcull_bind(ch, 2097 err = gk20a_channel_zcull_bind(ch,
2100 (struct nvhost_zcull_bind_args *)buf); 2098 (struct nvhost_zcull_bind_args *)buf);
2101 gk20a_channel_idle(dev); 2099 gk20a_idle(dev);
2102 break; 2100 break;
2103 case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER: 2101 case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
2104 err = gk20a_channel_busy(dev); 2102 err = gk20a_busy(dev);
2105 if (err) { 2103 if (err) {
2106 dev_err(&dev->dev, 2104 dev_err(&dev->dev,
2107 "%s: failed to host gk20a for ioctl cmd: 0x%x", 2105 "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2110,11 +2108,11 @@ long gk20a_channel_ioctl(struct file *filp,
2110 } 2108 }
2111 err = gk20a_init_error_notifier(ch, 2109 err = gk20a_init_error_notifier(ch,
2112 (struct nvhost_set_error_notifier *)buf); 2110 (struct nvhost_set_error_notifier *)buf);
2113 gk20a_channel_idle(dev); 2111 gk20a_idle(dev);
2114 break; 2112 break;
2115#ifdef CONFIG_GK20A_CYCLE_STATS 2113#ifdef CONFIG_GK20A_CYCLE_STATS
2116 case NVHOST_IOCTL_CHANNEL_CYCLE_STATS: 2114 case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
2117 err = gk20a_channel_busy(dev); 2115 err = gk20a_busy(dev);
2118 if (err) { 2116 if (err) {
2119 dev_err(&dev->dev, 2117 dev_err(&dev->dev,
2120 "%s: failed to host gk20a for ioctl cmd: 0x%x", 2118 "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2123,7 +2121,7 @@ long gk20a_channel_ioctl(struct file *filp,
2123 } 2121 }
2124 err = gk20a_channel_cycle_stats(ch, 2122 err = gk20a_channel_cycle_stats(ch,
2125 (struct nvhost_cycle_stats_args *)buf); 2123 (struct nvhost_cycle_stats_args *)buf);
2126 gk20a_channel_idle(dev); 2124 gk20a_idle(dev);
2127 break; 2125 break;
2128#endif 2126#endif
2129 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT: 2127 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
@@ -2153,7 +2151,7 @@ long gk20a_channel_ioctl(struct file *filp,
2153 ch->has_timedout; 2151 ch->has_timedout;
2154 break; 2152 break;
2155 case NVHOST_IOCTL_CHANNEL_SET_PRIORITY: 2153 case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
2156 err = gk20a_channel_busy(dev); 2154 err = gk20a_busy(dev);
2157 if (err) { 2155 if (err) {
2158 dev_err(&dev->dev, 2156 dev_err(&dev->dev,
2159 "%s: failed to host gk20a for ioctl cmd: 0x%x", 2157 "%s: failed to host gk20a for ioctl cmd: 0x%x",
@@ -2162,7 +2160,7 @@ long gk20a_channel_ioctl(struct file *filp,
2162 } 2160 }
2163 gk20a_channel_set_priority(ch, 2161 gk20a_channel_set_priority(ch,
2164 ((struct nvhost_set_priority_args *)buf)->priority); 2162 ((struct nvhost_set_priority_args *)buf)->priority);
2165 gk20a_channel_idle(dev); 2163 gk20a_idle(dev);
2166 break; 2164 break;
2167 default: 2165 default:
2168 dev_err(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd); 2166 dev_err(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 525d8638..841ec596 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -227,14 +227,14 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
227 227
228 if (register_irq) { 228 if (register_irq) {
229 /* nvhost action_gpfifo_submit_complete releases this ref. */ 229 /* nvhost action_gpfifo_submit_complete releases this ref. */
230 err = gk20a_channel_busy(c->g->dev); 230 err = gk20a_busy(c->g->dev);
231 231
232 if (!err) { 232 if (!err) {
233 err = nvhost_intr_register_notifier(sp->host1x_pdev, 233 err = nvhost_intr_register_notifier(sp->host1x_pdev,
234 sp->id, thresh, 234 sp->id, thresh,
235 gk20a_channel_syncpt_update, c); 235 gk20a_channel_syncpt_update, c);
236 if (err) 236 if (err)
237 gk20a_channel_idle(c->g->dev); 237 gk20a_idle(c->g->dev);
238 } 238 }
239 239
240 /* Adding interrupt action should never fail. A proper error 240 /* Adding interrupt action should never fail. A proper error
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index bc5dc4c1..ed07d9ca 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -555,7 +555,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
555 555
556 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy"); 556 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
557 gk20a_busy(g->dev); 557 gk20a_busy(g->dev);
558 err = gk20a_channel_busy(dbg_s->pdev); 558 err = gk20a_busy(dbg_s->pdev);
559 if (err) 559 if (err)
560 return -EPERM; 560 return -EPERM;
561 561
@@ -600,7 +600,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
600 gk20a_pmu_enable_elpg(g); 600 gk20a_pmu_enable_elpg(g);
601 601
602 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); 602 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
603 gk20a_channel_idle(dbg_s->pdev); 603 gk20a_idle(dbg_s->pdev);
604 gk20a_idle(g->dev); 604 gk20a_idle(g->dev);
605 } 605 }
606 606
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index 0e117b69..f04e9187 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -739,10 +739,10 @@ void gk20a_put_client(struct gk20a *g)
739 WARN_ON(g->client_refcount < 0); 739 WARN_ON(g->client_refcount < 0);
740} 740}
741 741
742static int gk20a_pm_prepare_poweroff(struct device *_dev) 742static int gk20a_pm_prepare_poweroff(struct device *dev)
743{ 743{
744 struct platform_device *dev = to_platform_device(_dev); 744 struct platform_device *pdev = to_platform_device(dev);
745 struct gk20a *g = get_gk20a(dev); 745 struct gk20a *g = get_gk20a(pdev);
746 int ret = 0; 746 int ret = 0;
747 747
748 gk20a_dbg_fn(""); 748 gk20a_dbg_fn("");
@@ -767,6 +767,7 @@ static int gk20a_pm_prepare_poweroff(struct device *_dev)
767 767
768 /* Disable GPCPLL */ 768 /* Disable GPCPLL */
769 ret |= gk20a_suspend_clk_support(g); 769 ret |= gk20a_suspend_clk_support(g);
770
770 g->power_on = false; 771 g->power_on = false;
771 772
772 return ret; 773 return ret;
@@ -790,10 +791,10 @@ static void gk20a_detect_chip(struct gk20a *g)
790 g->gpu_characteristics.rev); 791 g->gpu_characteristics.rev);
791} 792}
792 793
793static int gk20a_pm_finalize_poweron(struct device *_dev) 794static int gk20a_pm_finalize_poweron(struct device *dev)
794{ 795{
795 struct platform_device *dev = to_platform_device(_dev); 796 struct platform_device *pdev = to_platform_device(dev);
796 struct gk20a *g = get_gk20a(dev); 797 struct gk20a *g = get_gk20a(pdev);
797 int err, nice_value; 798 int err, nice_value;
798 799
799 gk20a_dbg_fn(""); 800 gk20a_dbg_fn("");
@@ -846,7 +847,7 @@ static int gk20a_pm_finalize_poweron(struct device *_dev)
846 saving features (blcg/slcg) are enabled. For now, do it here. */ 847 saving features (blcg/slcg) are enabled. For now, do it here. */
847 err = gk20a_init_clk_support(g); 848 err = gk20a_init_clk_support(g);
848 if (err) { 849 if (err) {
849 gk20a_err(&dev->dev, "failed to init gk20a clk"); 850 gk20a_err(dev, "failed to init gk20a clk");
850 goto done; 851 goto done;
851 } 852 }
852 853
@@ -865,49 +866,49 @@ static int gk20a_pm_finalize_poweron(struct device *_dev)
865 866
866 err = gk20a_init_fifo_reset_enable_hw(g); 867 err = gk20a_init_fifo_reset_enable_hw(g);
867 if (err) { 868 if (err) {
868 gk20a_err(&dev->dev, "failed to reset gk20a fifo"); 869 gk20a_err(dev, "failed to reset gk20a fifo");
869 goto done; 870 goto done;
870 } 871 }
871 872
872 err = gk20a_init_mm_support(g); 873 err = gk20a_init_mm_support(g);
873 if (err) { 874 if (err) {
874 gk20a_err(&dev->dev, "failed to init gk20a mm"); 875 gk20a_err(dev, "failed to init gk20a mm");
875 goto done; 876 goto done;
876 } 877 }
877 878
878 err = gk20a_init_pmu_support(g); 879 err = gk20a_init_pmu_support(g);
879 if (err) { 880 if (err) {
880 gk20a_err(&dev->dev, "failed to init gk20a pmu"); 881 gk20a_err(dev, "failed to init gk20a pmu");
881 goto done; 882 goto done;
882 } 883 }
883 884
884 err = gk20a_init_fifo_support(g); 885 err = gk20a_init_fifo_support(g);
885 if (err) { 886 if (err) {
886 gk20a_err(&dev->dev, "failed to init gk20a fifo"); 887 gk20a_err(dev, "failed to init gk20a fifo");
887 goto done; 888 goto done;
888 } 889 }
889 890
890 err = gk20a_init_gr_support(g); 891 err = gk20a_init_gr_support(g);
891 if (err) { 892 if (err) {
892 gk20a_err(&dev->dev, "failed to init gk20a gr"); 893 gk20a_err(dev, "failed to init gk20a gr");
893 goto done; 894 goto done;
894 } 895 }
895 896
896 err = gk20a_init_pmu_setup_hw2(g); 897 err = gk20a_init_pmu_setup_hw2(g);
897 if (err) { 898 if (err) {
898 gk20a_err(&dev->dev, "failed to init gk20a pmu_hw2"); 899 gk20a_err(dev, "failed to init gk20a pmu_hw2");
899 goto done; 900 goto done;
900 } 901 }
901 902
902 err = gk20a_init_therm_support(g); 903 err = gk20a_init_therm_support(g);
903 if (err) { 904 if (err) {
904 gk20a_err(&dev->dev, "failed to init gk20a therm"); 905 gk20a_err(dev, "failed to init gk20a therm");
905 goto done; 906 goto done;
906 } 907 }
907 908
908 err = gk20a_init_gpu_characteristics(g); 909 err = gk20a_init_gpu_characteristics(g);
909 if (err) { 910 if (err) {
910 gk20a_err(&dev->dev, "failed to init gk20a gpu characteristics"); 911 gk20a_err(dev, "failed to init gk20a gpu characteristics");
911 goto done; 912 goto done;
912 } 913 }
913 914
@@ -1507,27 +1508,6 @@ void gk20a_busy_noresume(struct platform_device *pdev)
1507 pm_runtime_get_noresume(&pdev->dev); 1508 pm_runtime_get_noresume(&pdev->dev);
1508} 1509}
1509 1510
1510int gk20a_channel_busy(struct platform_device *pdev)
1511{
1512 int ret = 0;
1513
1514 ret = gk20a_platform_channel_busy(pdev);
1515 if (ret)
1516 return ret;
1517
1518 ret = gk20a_busy(pdev);
1519 if (ret)
1520 gk20a_platform_channel_idle(pdev);
1521
1522 return ret;
1523}
1524
1525void gk20a_channel_idle(struct platform_device *pdev)
1526{
1527 gk20a_idle(pdev);
1528 gk20a_platform_channel_idle(pdev);
1529}
1530
1531int gk20a_busy(struct platform_device *pdev) 1511int gk20a_busy(struct platform_device *pdev)
1532{ 1512{
1533 int ret = 0; 1513 int ret = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index e071573d..48b0f0a6 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -549,8 +549,6 @@ int clk_gk20a_debugfs_init(struct platform_device *dev);
549void gk20a_busy_noresume(struct platform_device *pdev); 549void gk20a_busy_noresume(struct platform_device *pdev);
550int gk20a_busy(struct platform_device *pdev); 550int gk20a_busy(struct platform_device *pdev);
551void gk20a_idle(struct platform_device *pdev); 551void gk20a_idle(struct platform_device *pdev);
552int gk20a_channel_busy(struct platform_device *pdev);
553void gk20a_channel_idle(struct platform_device *pdev);
554void gk20a_disable(struct gk20a *g, u32 units); 552void gk20a_disable(struct gk20a *g, u32 units);
555void gk20a_enable(struct gk20a *g, u32 units); 553void gk20a_enable(struct gk20a *g, u32 units);
556void gk20a_reset(struct gk20a *g, u32 units); 554void gk20a_reset(struct gk20a *g, u32 units);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
index a5267eac..335c3f3b 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
@@ -279,7 +279,7 @@ static ssize_t elpg_enable_store(struct device *device,
279 * Since elpg is refcounted, we should not unnecessarily call 279 * Since elpg is refcounted, we should not unnecessarily call
280 * enable/disable if it is already so. 280 * enable/disable if it is already so.
281 */ 281 */
282 err = gk20a_channel_busy(g->dev); 282 err = gk20a_busy(g->dev);
283 if (err) 283 if (err)
284 return -EAGAIN; 284 return -EAGAIN;
285 285
@@ -290,7 +290,7 @@ static ssize_t elpg_enable_store(struct device *device,
290 g->elpg_enabled = false; 290 g->elpg_enabled = false;
291 gk20a_pmu_disable_elpg(g); 291 gk20a_pmu_disable_elpg(g);
292 } 292 }
293 gk20a_channel_idle(g->dev); 293 gk20a_idle(g->dev);
294 294
295 dev_info(device, "ELPG is %s.\n", g->elpg_enabled ? "enabled" : 295 dev_info(device, "ELPG is %s.\n", g->elpg_enabled ? "enabled" :
296 "disabled"); 296 "disabled");
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
index 09f348cb..90333eef 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
@@ -71,17 +71,6 @@ struct gk20a_platform {
71 */ 71 */
72 int (*late_probe)(struct platform_device *dev); 72 int (*late_probe)(struct platform_device *dev);
73 73
74 /* Called before submitting work to the gpu. The platform may use this
75 * hook to ensure that any other hw modules that the gpu depends on are
76 * powered. The platform implementation must count refs to this call. */
77 int (*channel_busy)(struct platform_device *dev);
78
79 /* Called after the work on the gpu is completed. The platform may use
80 * this hook to release power refs to any other hw modules that the gpu
81 * depends on. The platform implementation must count refs to this
82 * call. */
83 void (*channel_idle)(struct platform_device *dev);
84
85 /* This function is called to allocate secure memory (memory that the 74 /* This function is called to allocate secure memory (memory that the
86 * CPU cannot see). The function should fill the context buffer 75 * CPU cannot see). The function should fill the context buffer
87 * descriptor (especially fields destroy, sgt, size). 76 * descriptor (especially fields destroy, sgt, size).
@@ -134,23 +123,6 @@ extern struct gk20a_platform gk20a_generic_platform;
134extern struct gk20a_platform gk20a_tegra_platform; 123extern struct gk20a_platform gk20a_tegra_platform;
135#endif 124#endif
136 125
137static inline int gk20a_platform_channel_busy(struct platform_device *dev)
138{
139 struct gk20a_platform *p = gk20a_get_platform(dev);
140 int ret = 0;
141 if (p->channel_busy)
142 ret = p->channel_busy(dev);
143
144 return ret;
145}
146
147static inline void gk20a_platform_channel_idle(struct platform_device *dev)
148{
149 struct gk20a_platform *p = gk20a_get_platform(dev);
150 if (p->channel_idle)
151 p->channel_idle(dev);
152}
153
154static inline bool gk20a_platform_has_syncpoints(struct platform_device *dev) 126static inline bool gk20a_platform_has_syncpoints(struct platform_device *dev)
155{ 127{
156 struct gk20a_platform *p = gk20a_get_platform(dev); 128 struct gk20a_platform *p = gk20a_get_platform(dev);
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
index a5d5ad3e..ea49a124 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
@@ -92,37 +92,6 @@ int FXDIV(int x, int y)
92 return (x << pos) / y; 92 return (x << pos) / y;
93} 93}
94 94
95static int gk20a_tegra_channel_busy(struct platform_device *dev)
96{
97 int ret = 0;
98
99 /* Explicitly turn on the host1x clocks
100 * - This is needed as host1x driver sets ignore_children = true
101 * to cater the use case of display clock ON but host1x clock OFF
102 * in OS-Idle-Display-ON case
103 * - This was easily done in ACM as it only checked the ref count
104 * of host1x (or any device for that matter) to be zero before
105 * turning off its clock
106 * - However, runtime PM checks to see if *ANY* child of device is
107 * in ACTIVE state and if yes, it doesn't suspend the parent. As a
108 * result of this, display && host1x clocks remains ON during
109 * OS-Idle-Display-ON case
110 * - The code below fixes this use-case
111 */
112 if (to_platform_device(dev->dev.parent))
113 ret = nvhost_module_busy_ext(
114 to_platform_device(dev->dev.parent));
115
116 return ret;
117}
118
119static void gk20a_tegra_channel_idle(struct platform_device *dev)
120{
121 /* Explicitly turn off the host1x clocks */
122 if (to_platform_device(dev->dev.parent))
123 nvhost_module_idle_ext(to_platform_device(dev->dev.parent));
124}
125
126static void gk20a_tegra_secure_destroy(struct platform_device *pdev, 95static void gk20a_tegra_secure_destroy(struct platform_device *pdev,
127 struct gr_ctx_buffer_desc *desc) 96 struct gr_ctx_buffer_desc *desc)
128{ 97{
@@ -455,8 +424,8 @@ static int gk20a_tegra_late_probe(struct platform_device *dev)
455{ 424{
456 struct gk20a_platform *platform = gk20a_get_platform(dev); 425 struct gk20a_platform *platform = gk20a_get_platform(dev);
457 426
458 /* Make gk20a power domain a subdomain of mc */ 427 /* Make gk20a power domain a subdomain of host1x */
459 tegra_pd_add_sd(&platform->g->pd); 428 nvhost_register_client_domain(&platform->g->pd);
460 429
461 /* Initialise tegra specific scaling quirks */ 430 /* Initialise tegra specific scaling quirks */
462 gk20a_tegra_scale_init(dev); 431 gk20a_tegra_scale_init(dev);
@@ -520,8 +489,6 @@ struct gk20a_platform t132_gk20a_tegra_platform = {
520 .devfreq_governor = "nvhost_podgov", 489 .devfreq_governor = "nvhost_podgov",
521 .qos_id = PM_QOS_GPU_FREQ_MIN, 490 .qos_id = PM_QOS_GPU_FREQ_MIN,
522 491
523 .channel_busy = gk20a_tegra_channel_busy,
524 .channel_idle = gk20a_tegra_channel_idle,
525 .secure_alloc = gk20a_tegra_secure_alloc, 492 .secure_alloc = gk20a_tegra_secure_alloc,
526 .dump_platform_dependencies = gk20a_tegra_debug_dump, 493 .dump_platform_dependencies = gk20a_tegra_debug_dump,
527}; 494};
@@ -548,8 +515,6 @@ struct gk20a_platform gk20a_tegra_platform = {
548 .devfreq_governor = "nvhost_podgov", 515 .devfreq_governor = "nvhost_podgov",
549 .qos_id = PM_QOS_GPU_FREQ_MIN, 516 .qos_id = PM_QOS_GPU_FREQ_MIN,
550 517
551 .channel_busy = gk20a_tegra_channel_busy,
552 .channel_idle = gk20a_tegra_channel_idle,
553 .secure_alloc = gk20a_tegra_secure_alloc, 518 .secure_alloc = gk20a_tegra_secure_alloc,
554 .dump_platform_dependencies = gk20a_tegra_debug_dump, 519 .dump_platform_dependencies = gk20a_tegra_debug_dump,
555}; 520};