summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2014-09-29 06:16:15 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:33 -0400
commit719923ad9fa7c6b2ca68a25d1ce4518aab844bc2 (patch)
treebcb3dfbbd2968bf4b863f8990c11f05bc61ed6df /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parent83bf2aa83d922080884a9fe547b656e24495e16e (diff)
gpu: nvgpu: rename gpu ioctls and structs to nvgpu
To help remove the nvhost dependency from nvgpu, rename ioctl defines and structures used by nvgpu such that nvhost is replaced by nvgpu. Duplicate some structures as needed. Update header guards and such accordingly. Change-Id: Ifc3a867713072bae70256502735583ab38381877 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/542620 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c166
1 files changed, 82 insertions, 84 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 34c95483..0e8eb497 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/channel_gk20a.c
3 *
4 * GK20A Graphics channel 2 * GK20A Graphics channel
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -14,9 +12,8 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 * 14 *
17 * You should have received a copy of the GNU General Public License along with 15 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 17 */
21 18
22#include <linux/nvhost.h> 19#include <linux/nvhost.h>
@@ -497,15 +494,15 @@ static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch)
497} 494}
498 495
499static int gk20a_channel_cycle_stats(struct channel_gk20a *ch, 496static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
500 struct nvhost_cycle_stats_args *args) 497 struct nvgpu_cycle_stats_args *args)
501{ 498{
502 struct dma_buf *dmabuf; 499 struct dma_buf *dmabuf;
503 void *virtual_address; 500 void *virtual_address;
504 501
505 if (args->nvmap_handle && !ch->cyclestate.cyclestate_buffer_handler) { 502 if (args->dmabuf_fd && !ch->cyclestate.cyclestate_buffer_handler) {
506 503
507 /* set up new cyclestats buffer */ 504 /* set up new cyclestats buffer */
508 dmabuf = dma_buf_get(args->nvmap_handle); 505 dmabuf = dma_buf_get(args->dmabuf_fd);
509 if (IS_ERR(dmabuf)) 506 if (IS_ERR(dmabuf))
510 return PTR_ERR(dmabuf); 507 return PTR_ERR(dmabuf);
511 virtual_address = dma_buf_vmap(dmabuf); 508 virtual_address = dma_buf_vmap(dmabuf);
@@ -517,12 +514,12 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
517 ch->cyclestate.cyclestate_buffer_size = dmabuf->size; 514 ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
518 return 0; 515 return 0;
519 516
520 } else if (!args->nvmap_handle && 517 } else if (!args->dmabuf_fd &&
521 ch->cyclestate.cyclestate_buffer_handler) { 518 ch->cyclestate.cyclestate_buffer_handler) {
522 gk20a_free_cycle_stats_buffer(ch); 519 gk20a_free_cycle_stats_buffer(ch);
523 return 0; 520 return 0;
524 521
525 } else if (!args->nvmap_handle && 522 } else if (!args->dmabuf_fd &&
526 !ch->cyclestate.cyclestate_buffer_handler) { 523 !ch->cyclestate.cyclestate_buffer_handler) {
527 /* no requst from GL */ 524 /* no requst from GL */
528 return 0; 525 return 0;
@@ -535,7 +532,7 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
535#endif 532#endif
536 533
537static int gk20a_init_error_notifier(struct channel_gk20a *ch, 534static int gk20a_init_error_notifier(struct channel_gk20a *ch,
538 struct nvhost_set_error_notifier *args) { 535 struct nvgpu_set_error_notifier *args) {
539 void *va; 536 void *va;
540 537
541 struct dma_buf *dmabuf; 538 struct dma_buf *dmabuf;
@@ -566,7 +563,7 @@ static int gk20a_init_error_notifier(struct channel_gk20a *ch,
566 ch->error_notifier_ref = dmabuf; 563 ch->error_notifier_ref = dmabuf;
567 ch->error_notifier = va + args->offset; 564 ch->error_notifier = va + args->offset;
568 ch->error_notifier_va = va; 565 ch->error_notifier_va = va;
569 memset(ch->error_notifier, 0, sizeof(struct nvhost_notification)); 566 memset(ch->error_notifier, 0, sizeof(struct nvgpu_notification));
570 return 0; 567 return 0;
571} 568}
572 569
@@ -1104,8 +1101,9 @@ static void recycle_priv_cmdbuf(struct channel_gk20a *c)
1104 gk20a_dbg_fn("done"); 1101 gk20a_dbg_fn("done");
1105} 1102}
1106 1103
1104
1107int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c, 1105int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1108 struct nvhost_alloc_gpfifo_args *args) 1106 struct nvgpu_alloc_gpfifo_args *args)
1109{ 1107{
1110 struct gk20a *g = c->g; 1108 struct gk20a *g = c->g;
1111 struct device *d = dev_from_gk20a(g); 1109 struct device *d = dev_from_gk20a(g);
@@ -1119,7 +1117,7 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1119 and another one after, for internal usage. Triple the requested size. */ 1117 and another one after, for internal usage. Triple the requested size. */
1120 gpfifo_size = roundup_pow_of_two(args->num_entries * 3); 1118 gpfifo_size = roundup_pow_of_two(args->num_entries * 3);
1121 1119
1122 if (args->flags & NVHOST_ALLOC_GPFIFO_FLAGS_VPR_ENABLED) 1120 if (args->flags & NVGPU_ALLOC_GPFIFO_FLAGS_VPR_ENABLED)
1123 c->vpr = true; 1121 c->vpr = true;
1124 1122
1125 /* an address space needs to have been bound at this point. */ 1123 /* an address space needs to have been bound at this point. */
@@ -1496,10 +1494,10 @@ void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
1496} 1494}
1497 1495
1498int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, 1496int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1499 struct nvhost_gpfifo *gpfifo, 1497 struct nvgpu_gpfifo *gpfifo,
1500 u32 num_entries, 1498 u32 num_entries,
1501 u32 flags, 1499 u32 flags,
1502 struct nvhost_fence *fence, 1500 struct nvgpu_fence *fence,
1503 struct gk20a_fence **fence_out) 1501 struct gk20a_fence **fence_out)
1504{ 1502{
1505 struct gk20a *g = c->g; 1503 struct gk20a *g = c->g;
@@ -1514,13 +1512,13 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1514 /* we might need two extra gpfifo entries - one for pre fence 1512 /* we might need two extra gpfifo entries - one for pre fence
1515 * and one for post fence. */ 1513 * and one for post fence. */
1516 const int extra_entries = 2; 1514 const int extra_entries = 2;
1517 bool need_wfi = !(flags & NVHOST_SUBMIT_GPFIFO_FLAGS_SUPPRESS_WFI); 1515 bool need_wfi = !(flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SUPPRESS_WFI);
1518 1516
1519 if (c->has_timedout) 1517 if (c->has_timedout)
1520 return -ETIMEDOUT; 1518 return -ETIMEDOUT;
1521 1519
1522 if ((flags & (NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT | 1520 if ((flags & (NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT |
1523 NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET)) && 1521 NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET)) &&
1524 !fence) 1522 !fence)
1525 return -EINVAL; 1523 return -EINVAL;
1526 1524
@@ -1551,7 +1549,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1551 c->hw_chid, 1549 c->hw_chid,
1552 num_entries, 1550 num_entries,
1553 flags, 1551 flags,
1554 fence ? fence->syncpt_id : 0, 1552 fence ? fence->id : 0,
1555 fence ? fence->value : 0); 1553 fence ? fence->value : 0);
1556 check_gp_put(g, c); 1554 check_gp_put(g, c);
1557 update_gp_get(g, c); 1555 update_gp_get(g, c);
@@ -1603,13 +1601,13 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1603 * the only reason this isn't being unceremoniously killed is to 1601 * the only reason this isn't being unceremoniously killed is to
1604 * keep running some tests which trigger this condition 1602 * keep running some tests which trigger this condition
1605 */ 1603 */
1606 if (flags & NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT) { 1604 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT) {
1607 if (flags & NVHOST_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) { 1605 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
1608 wait_fence_fd = fence->syncpt_id; 1606 wait_fence_fd = fence->id;
1609 err = c->sync->wait_fd(c->sync, wait_fence_fd, 1607 err = c->sync->wait_fd(c->sync, wait_fence_fd,
1610 &wait_cmd, &pre_fence); 1608 &wait_cmd, &pre_fence);
1611 } else { 1609 } else {
1612 err = c->sync->wait_syncpt(c->sync, fence->syncpt_id, 1610 err = c->sync->wait_syncpt(c->sync, fence->id,
1613 fence->value, &wait_cmd, &pre_fence); 1611 fence->value, &wait_cmd, &pre_fence);
1614 } 1612 }
1615 } 1613 }
@@ -1621,7 +1619,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1621 1619
1622 /* always insert syncpt increment at end of gpfifo submission 1620 /* always insert syncpt increment at end of gpfifo submission
1623 to keep track of method completion for idle railgating */ 1621 to keep track of method completion for idle railgating */
1624 if (flags & NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET) 1622 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET)
1625 err = c->sync->incr_user(c->sync, wait_fence_fd, &incr_cmd, 1623 err = c->sync->incr_user(c->sync, wait_fence_fd, &incr_cmd,
1626 &post_fence, need_wfi); 1624 &post_fence, need_wfi);
1627 else 1625 else
@@ -1822,7 +1820,7 @@ cleanup_put:
1822} 1820}
1823 1821
1824static int gk20a_channel_wait(struct channel_gk20a *ch, 1822static int gk20a_channel_wait(struct channel_gk20a *ch,
1825 struct nvhost_wait_args *args) 1823 struct nvgpu_wait_args *args)
1826{ 1824{
1827 struct device *d = dev_from_gk20a(ch->g); 1825 struct device *d = dev_from_gk20a(ch->g);
1828 struct dma_buf *dmabuf; 1826 struct dma_buf *dmabuf;
@@ -1839,14 +1837,14 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
1839 if (ch->has_timedout) 1837 if (ch->has_timedout)
1840 return -ETIMEDOUT; 1838 return -ETIMEDOUT;
1841 1839
1842 if (args->timeout == NVHOST_NO_TIMEOUT) 1840 if (args->timeout == NVGPU_NO_TIMEOUT)
1843 timeout = MAX_SCHEDULE_TIMEOUT; 1841 timeout = MAX_SCHEDULE_TIMEOUT;
1844 else 1842 else
1845 timeout = (u32)msecs_to_jiffies(args->timeout); 1843 timeout = (u32)msecs_to_jiffies(args->timeout);
1846 1844
1847 switch (args->type) { 1845 switch (args->type) {
1848 case NVHOST_WAIT_TYPE_NOTIFIER: 1846 case NVGPU_WAIT_TYPE_NOTIFIER:
1849 id = args->condition.notifier.nvmap_handle; 1847 id = args->condition.notifier.dmabuf_fd;
1850 offset = args->condition.notifier.offset; 1848 offset = args->condition.notifier.offset;
1851 1849
1852 dmabuf = dma_buf_get(id); 1850 dmabuf = dma_buf_get(id);
@@ -1891,9 +1889,9 @@ notif_clean_up:
1891 dma_buf_vunmap(dmabuf, notif); 1889 dma_buf_vunmap(dmabuf, notif);
1892 return ret; 1890 return ret;
1893 1891
1894 case NVHOST_WAIT_TYPE_SEMAPHORE: 1892 case NVGPU_WAIT_TYPE_SEMAPHORE:
1895 ret = gk20a_channel_wait_semaphore(ch, 1893 ret = gk20a_channel_wait_semaphore(ch,
1896 args->condition.semaphore.nvmap_handle, 1894 args->condition.semaphore.dmabuf_fd,
1897 args->condition.semaphore.offset, 1895 args->condition.semaphore.offset,
1898 args->condition.semaphore.payload, 1896 args->condition.semaphore.payload,
1899 timeout); 1897 timeout);
@@ -1948,7 +1946,7 @@ static void gk20a_channel_events_clear(struct channel_gk20a_poll_events *ev)
1948} 1946}
1949 1947
1950static int gk20a_channel_events_ctrl(struct channel_gk20a *ch, 1948static int gk20a_channel_events_ctrl(struct channel_gk20a *ch,
1951 struct nvhost_channel_events_ctrl_args *args) 1949 struct nvgpu_channel_events_ctrl_args *args)
1952{ 1950{
1953 int ret = 0; 1951 int ret = 0;
1954 1952
@@ -1956,15 +1954,15 @@ static int gk20a_channel_events_ctrl(struct channel_gk20a *ch,
1956 "channel events ctrl cmd %d", args->cmd); 1954 "channel events ctrl cmd %d", args->cmd);
1957 1955
1958 switch (args->cmd) { 1956 switch (args->cmd) {
1959 case NVHOST_IOCTL_CHANNEL_EVENTS_CTRL_CMD_ENABLE: 1957 case NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_ENABLE:
1960 gk20a_channel_events_enable(&ch->poll_events); 1958 gk20a_channel_events_enable(&ch->poll_events);
1961 break; 1959 break;
1962 1960
1963 case NVHOST_IOCTL_CHANNEL_EVENTS_CTRL_CMD_DISABLE: 1961 case NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_DISABLE:
1964 gk20a_channel_events_disable(&ch->poll_events); 1962 gk20a_channel_events_disable(&ch->poll_events);
1965 break; 1963 break;
1966 1964
1967 case NVHOST_IOCTL_CHANNEL_EVENTS_CTRL_CMD_CLEAR: 1965 case NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_CLEAR:
1968 gk20a_channel_events_clear(&ch->poll_events); 1966 gk20a_channel_events_clear(&ch->poll_events);
1969 break; 1967 break;
1970 1968
@@ -2027,15 +2025,15 @@ static int gk20a_channel_set_priority(struct channel_gk20a *ch,
2027 u32 timeslice_timeout; 2025 u32 timeslice_timeout;
2028 /* set priority of graphics channel */ 2026 /* set priority of graphics channel */
2029 switch (priority) { 2027 switch (priority) {
2030 case NVHOST_PRIORITY_LOW: 2028 case NVGPU_PRIORITY_LOW:
2031 /* 64 << 3 = 512us */ 2029 /* 64 << 3 = 512us */
2032 timeslice_timeout = 64; 2030 timeslice_timeout = 64;
2033 break; 2031 break;
2034 case NVHOST_PRIORITY_MEDIUM: 2032 case NVGPU_PRIORITY_MEDIUM:
2035 /* 128 << 3 = 1024us */ 2033 /* 128 << 3 = 1024us */
2036 timeslice_timeout = 128; 2034 timeslice_timeout = 128;
2037 break; 2035 break;
2038 case NVHOST_PRIORITY_HIGH: 2036 case NVGPU_PRIORITY_HIGH:
2039 /* 255 << 3 = 2048us */ 2037 /* 255 << 3 = 2048us */
2040 timeslice_timeout = 255; 2038 timeslice_timeout = 255;
2041 break; 2039 break;
@@ -2049,7 +2047,7 @@ static int gk20a_channel_set_priority(struct channel_gk20a *ch,
2049} 2047}
2050 2048
2051static int gk20a_channel_zcull_bind(struct channel_gk20a *ch, 2049static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
2052 struct nvhost_zcull_bind_args *args) 2050 struct nvgpu_zcull_bind_args *args)
2053{ 2051{
2054 struct gk20a *g = ch->g; 2052 struct gk20a *g = ch->g;
2055 struct gr_gk20a *gr = &g->gr; 2053 struct gr_gk20a *gr = &g->gr;
@@ -2145,7 +2143,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g)
2145 2143
2146static int gk20a_ioctl_channel_submit_gpfifo( 2144static int gk20a_ioctl_channel_submit_gpfifo(
2147 struct channel_gk20a *ch, 2145 struct channel_gk20a *ch,
2148 struct nvhost_submit_gpfifo_args *args) 2146 struct nvgpu_submit_gpfifo_args *args)
2149{ 2147{
2150 struct gk20a_fence *fence_out; 2148 struct gk20a_fence *fence_out;
2151 void *gpfifo; 2149 void *gpfifo;
@@ -2157,7 +2155,7 @@ static int gk20a_ioctl_channel_submit_gpfifo(
2157 if (ch->has_timedout) 2155 if (ch->has_timedout)
2158 return -ETIMEDOUT; 2156 return -ETIMEDOUT;
2159 2157
2160 size = args->num_entries * sizeof(struct nvhost_gpfifo); 2158 size = args->num_entries * sizeof(struct nvgpu_gpfifo);
2161 2159
2162 gpfifo = kzalloc(size, GFP_KERNEL); 2160 gpfifo = kzalloc(size, GFP_KERNEL);
2163 if (!gpfifo) 2161 if (!gpfifo)
@@ -2177,15 +2175,15 @@ static int gk20a_ioctl_channel_submit_gpfifo(
2177 goto clean_up; 2175 goto clean_up;
2178 2176
2179 /* Convert fence_out to something we can pass back to user space. */ 2177 /* Convert fence_out to something we can pass back to user space. */
2180 if (args->flags & NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET) { 2178 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
2181 if (args->flags & NVHOST_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) { 2179 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
2182 int fd = gk20a_fence_install_fd(fence_out); 2180 int fd = gk20a_fence_install_fd(fence_out);
2183 if (fd < 0) 2181 if (fd < 0)
2184 ret = fd; 2182 ret = fd;
2185 else 2183 else
2186 args->fence.syncpt_id = fd; 2184 args->fence.id = fd;
2187 } else { 2185 } else {
2188 args->fence.syncpt_id = fence_out->syncpt_id; 2186 args->fence.id = fence_out->syncpt_id;
2189 args->fence.value = fence_out->syncpt_value; 2187 args->fence.value = fence_out->syncpt_value;
2190 } 2188 }
2191 } 2189 }
@@ -2211,15 +2209,15 @@ long gk20a_channel_ioctl(struct file *filp,
2211{ 2209{
2212 struct channel_gk20a *ch = filp->private_data; 2210 struct channel_gk20a *ch = filp->private_data;
2213 struct platform_device *dev = ch->g->dev; 2211 struct platform_device *dev = ch->g->dev;
2214 u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE]; 2212 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE];
2215 int err = 0; 2213 int err = 0;
2216 2214
2217 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 2215 gk20a_dbg_fn("start %d", _IOC_NR(cmd));
2218 2216
2219 if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) || 2217 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) ||
2220 (_IOC_NR(cmd) == 0) || 2218 (_IOC_NR(cmd) == 0) ||
2221 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) || 2219 (_IOC_NR(cmd) > NVGPU_IOCTL_CHANNEL_LAST) ||
2222 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE)) 2220 (_IOC_SIZE(cmd) > NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE))
2223 return -EINVAL; 2221 return -EINVAL;
2224 2222
2225 if (_IOC_DIR(cmd) & _IOC_WRITE) { 2223 if (_IOC_DIR(cmd) & _IOC_WRITE) {
@@ -2228,7 +2226,7 @@ long gk20a_channel_ioctl(struct file *filp,
2228 } 2226 }
2229 2227
2230 switch (cmd) { 2228 switch (cmd) {
2231 case NVHOST_IOCTL_CHANNEL_OPEN: 2229 case NVGPU_IOCTL_CHANNEL_OPEN:
2232 { 2230 {
2233 int fd; 2231 int fd;
2234 struct file *file; 2232 struct file *file;
@@ -2263,12 +2261,12 @@ long gk20a_channel_ioctl(struct file *filp,
2263 break; 2261 break;
2264 } 2262 }
2265 2263
2266 ((struct nvhost_channel_open_args *)buf)->channel_fd = fd; 2264 ((struct nvgpu_channel_open_args *)buf)->channel_fd = fd;
2267 break; 2265 break;
2268 } 2266 }
2269 case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD: 2267 case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD:
2270 break; 2268 break;
2271 case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX: 2269 case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
2272 err = gk20a_busy(dev); 2270 err = gk20a_busy(dev);
2273 if (err) { 2271 if (err) {
2274 dev_err(&dev->dev, 2272 dev_err(&dev->dev,
@@ -2277,10 +2275,10 @@ long gk20a_channel_ioctl(struct file *filp,
2277 return err; 2275 return err;
2278 } 2276 }
2279 err = ch->g->ops.gr.alloc_obj_ctx(ch, 2277 err = ch->g->ops.gr.alloc_obj_ctx(ch,
2280 (struct nvhost_alloc_obj_ctx_args *)buf); 2278 (struct nvgpu_alloc_obj_ctx_args *)buf);
2281 gk20a_idle(dev); 2279 gk20a_idle(dev);
2282 break; 2280 break;
2283 case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX: 2281 case NVGPU_IOCTL_CHANNEL_FREE_OBJ_CTX:
2284 err = gk20a_busy(dev); 2282 err = gk20a_busy(dev);
2285 if (err) { 2283 if (err) {
2286 dev_err(&dev->dev, 2284 dev_err(&dev->dev,
@@ -2289,10 +2287,10 @@ long gk20a_channel_ioctl(struct file *filp,
2289 return err; 2287 return err;
2290 } 2288 }
2291 err = ch->g->ops.gr.free_obj_ctx(ch, 2289 err = ch->g->ops.gr.free_obj_ctx(ch,
2292 (struct nvhost_free_obj_ctx_args *)buf); 2290 (struct nvgpu_free_obj_ctx_args *)buf);
2293 gk20a_idle(dev); 2291 gk20a_idle(dev);
2294 break; 2292 break;
2295 case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO: 2293 case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO:
2296 err = gk20a_busy(dev); 2294 err = gk20a_busy(dev);
2297 if (err) { 2295 if (err) {
2298 dev_err(&dev->dev, 2296 dev_err(&dev->dev,
@@ -2301,14 +2299,14 @@ long gk20a_channel_ioctl(struct file *filp,
2301 return err; 2299 return err;
2302 } 2300 }
2303 err = gk20a_alloc_channel_gpfifo(ch, 2301 err = gk20a_alloc_channel_gpfifo(ch,
2304 (struct nvhost_alloc_gpfifo_args *)buf); 2302 (struct nvgpu_alloc_gpfifo_args *)buf);
2305 gk20a_idle(dev); 2303 gk20a_idle(dev);
2306 break; 2304 break;
2307 case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO: 2305 case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO:
2308 err = gk20a_ioctl_channel_submit_gpfifo(ch, 2306 err = gk20a_ioctl_channel_submit_gpfifo(ch,
2309 (struct nvhost_submit_gpfifo_args *)buf); 2307 (struct nvgpu_submit_gpfifo_args *)buf);
2310 break; 2308 break;
2311 case NVHOST_IOCTL_CHANNEL_WAIT: 2309 case NVGPU_IOCTL_CHANNEL_WAIT:
2312 err = gk20a_busy(dev); 2310 err = gk20a_busy(dev);
2313 if (err) { 2311 if (err) {
2314 dev_err(&dev->dev, 2312 dev_err(&dev->dev,
@@ -2317,10 +2315,10 @@ long gk20a_channel_ioctl(struct file *filp,
2317 return err; 2315 return err;
2318 } 2316 }
2319 err = gk20a_channel_wait(ch, 2317 err = gk20a_channel_wait(ch,
2320 (struct nvhost_wait_args *)buf); 2318 (struct nvgpu_wait_args *)buf);
2321 gk20a_idle(dev); 2319 gk20a_idle(dev);
2322 break; 2320 break;
2323 case NVHOST_IOCTL_CHANNEL_ZCULL_BIND: 2321 case NVGPU_IOCTL_CHANNEL_ZCULL_BIND:
2324 err = gk20a_busy(dev); 2322 err = gk20a_busy(dev);
2325 if (err) { 2323 if (err) {
2326 dev_err(&dev->dev, 2324 dev_err(&dev->dev,
@@ -2329,10 +2327,10 @@ long gk20a_channel_ioctl(struct file *filp,
2329 return err; 2327 return err;
2330 } 2328 }
2331 err = gk20a_channel_zcull_bind(ch, 2329 err = gk20a_channel_zcull_bind(ch,
2332 (struct nvhost_zcull_bind_args *)buf); 2330 (struct nvgpu_zcull_bind_args *)buf);
2333 gk20a_idle(dev); 2331 gk20a_idle(dev);
2334 break; 2332 break;
2335 case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER: 2333 case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
2336 err = gk20a_busy(dev); 2334 err = gk20a_busy(dev);
2337 if (err) { 2335 if (err) {
2338 dev_err(&dev->dev, 2336 dev_err(&dev->dev,
@@ -2341,11 +2339,11 @@ long gk20a_channel_ioctl(struct file *filp,
2341 return err; 2339 return err;
2342 } 2340 }
2343 err = gk20a_init_error_notifier(ch, 2341 err = gk20a_init_error_notifier(ch,
2344 (struct nvhost_set_error_notifier *)buf); 2342 (struct nvgpu_set_error_notifier *)buf);
2345 gk20a_idle(dev); 2343 gk20a_idle(dev);
2346 break; 2344 break;
2347#ifdef CONFIG_GK20A_CYCLE_STATS 2345#ifdef CONFIG_GK20A_CYCLE_STATS
2348 case NVHOST_IOCTL_CHANNEL_CYCLE_STATS: 2346 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS:
2349 err = gk20a_busy(dev); 2347 err = gk20a_busy(dev);
2350 if (err) { 2348 if (err) {
2351 dev_err(&dev->dev, 2349 dev_err(&dev->dev,
@@ -2354,37 +2352,37 @@ long gk20a_channel_ioctl(struct file *filp,
2354 return err; 2352 return err;
2355 } 2353 }
2356 err = gk20a_channel_cycle_stats(ch, 2354 err = gk20a_channel_cycle_stats(ch,
2357 (struct nvhost_cycle_stats_args *)buf); 2355 (struct nvgpu_cycle_stats_args *)buf);
2358 gk20a_idle(dev); 2356 gk20a_idle(dev);
2359 break; 2357 break;
2360#endif 2358#endif
2361 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT: 2359 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT:
2362 { 2360 {
2363 u32 timeout = 2361 u32 timeout =
2364 (u32)((struct nvhost_set_timeout_args *)buf)->timeout; 2362 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
2365 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 2363 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
2366 timeout, ch->hw_chid); 2364 timeout, ch->hw_chid);
2367 ch->timeout_ms_max = timeout; 2365 ch->timeout_ms_max = timeout;
2368 break; 2366 break;
2369 } 2367 }
2370 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX: 2368 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT_EX:
2371 { 2369 {
2372 u32 timeout = 2370 u32 timeout =
2373 (u32)((struct nvhost_set_timeout_args *)buf)->timeout; 2371 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
2374 bool timeout_debug_dump = !((u32) 2372 bool timeout_debug_dump = !((u32)
2375 ((struct nvhost_set_timeout_ex_args *)buf)->flags & 2373 ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
2376 (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP)); 2374 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
2377 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 2375 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
2378 timeout, ch->hw_chid); 2376 timeout, ch->hw_chid);
2379 ch->timeout_ms_max = timeout; 2377 ch->timeout_ms_max = timeout;
2380 ch->timeout_debug_dump = timeout_debug_dump; 2378 ch->timeout_debug_dump = timeout_debug_dump;
2381 break; 2379 break;
2382 } 2380 }
2383 case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT: 2381 case NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT:
2384 ((struct nvhost_get_param_args *)buf)->value = 2382 ((struct nvgpu_get_param_args *)buf)->value =
2385 ch->has_timedout; 2383 ch->has_timedout;
2386 break; 2384 break;
2387 case NVHOST_IOCTL_CHANNEL_SET_PRIORITY: 2385 case NVGPU_IOCTL_CHANNEL_SET_PRIORITY:
2388 err = gk20a_busy(dev); 2386 err = gk20a_busy(dev);
2389 if (err) { 2387 if (err) {
2390 dev_err(&dev->dev, 2388 dev_err(&dev->dev,
@@ -2393,10 +2391,10 @@ long gk20a_channel_ioctl(struct file *filp,
2393 return err; 2391 return err;
2394 } 2392 }
2395 gk20a_channel_set_priority(ch, 2393 gk20a_channel_set_priority(ch,
2396 ((struct nvhost_set_priority_args *)buf)->priority); 2394 ((struct nvgpu_set_priority_args *)buf)->priority);
2397 gk20a_idle(dev); 2395 gk20a_idle(dev);
2398 break; 2396 break;
2399 case NVHOST_IOCTL_CHANNEL_ENABLE: 2397 case NVGPU_IOCTL_CHANNEL_ENABLE:
2400 err = gk20a_busy(dev); 2398 err = gk20a_busy(dev);
2401 if (err) { 2399 if (err) {
2402 dev_err(&dev->dev, 2400 dev_err(&dev->dev,
@@ -2410,7 +2408,7 @@ long gk20a_channel_ioctl(struct file *filp,
2410 ccsr_channel_enable_set_true_f()); 2408 ccsr_channel_enable_set_true_f());
2411 gk20a_idle(dev); 2409 gk20a_idle(dev);
2412 break; 2410 break;
2413 case NVHOST_IOCTL_CHANNEL_DISABLE: 2411 case NVGPU_IOCTL_CHANNEL_DISABLE:
2414 err = gk20a_busy(dev); 2412 err = gk20a_busy(dev);
2415 if (err) { 2413 if (err) {
2416 dev_err(&dev->dev, 2414 dev_err(&dev->dev,
@@ -2424,7 +2422,7 @@ long gk20a_channel_ioctl(struct file *filp,
2424 ccsr_channel_enable_clr_true_f()); 2422 ccsr_channel_enable_clr_true_f());
2425 gk20a_idle(dev); 2423 gk20a_idle(dev);
2426 break; 2424 break;
2427 case NVHOST_IOCTL_CHANNEL_PREEMPT: 2425 case NVGPU_IOCTL_CHANNEL_PREEMPT:
2428 err = gk20a_busy(dev); 2426 err = gk20a_busy(dev);
2429 if (err) { 2427 if (err) {
2430 dev_err(&dev->dev, 2428 dev_err(&dev->dev,
@@ -2435,7 +2433,7 @@ long gk20a_channel_ioctl(struct file *filp,
2435 err = gk20a_fifo_preempt(ch->g, ch); 2433 err = gk20a_fifo_preempt(ch->g, ch);
2436 gk20a_idle(dev); 2434 gk20a_idle(dev);
2437 break; 2435 break;
2438 case NVHOST_IOCTL_CHANNEL_FORCE_RESET: 2436 case NVGPU_IOCTL_CHANNEL_FORCE_RESET:
2439 err = gk20a_busy(dev); 2437 err = gk20a_busy(dev);
2440 if (err) { 2438 if (err) {
2441 dev_err(&dev->dev, 2439 dev_err(&dev->dev,
@@ -2446,9 +2444,9 @@ long gk20a_channel_ioctl(struct file *filp,
2446 err = gk20a_fifo_force_reset_ch(ch, true); 2444 err = gk20a_fifo_force_reset_ch(ch, true);
2447 gk20a_idle(dev); 2445 gk20a_idle(dev);
2448 break; 2446 break;
2449 case NVHOST_IOCTL_CHANNEL_EVENTS_CTRL: 2447 case NVGPU_IOCTL_CHANNEL_EVENTS_CTRL:
2450 err = gk20a_channel_events_ctrl(ch, 2448 err = gk20a_channel_events_ctrl(ch,
2451 (struct nvhost_channel_events_ctrl_args *)buf); 2449 (struct nvgpu_channel_events_ctrl_args *)buf);
2452 break; 2450 break;
2453 default: 2451 default:
2454 dev_dbg(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd); 2452 dev_dbg(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd);