diff options
author | David Nieto <dmartineznie@nvidia.com> | 2017-03-13 21:45:37 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-03-24 00:05:35 -0400 |
commit | 2a502bdd5f3c93b87286456ca901ad43b0f14906 (patch) | |
tree | aba5c18b07b393e0306588d9ba4707a6c272ae6b /drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |
parent | a84f601fbaf6b40e14a321eda1e83d93e55cebba (diff) |
gpu: nvgpu: pass gk20a struct to gk20a_busy
After driver remove, the device structure passed in gk20a_busy can be
invalid. To solve this the prototype of the function is modified to pass
the gk20a struct instead of the device pointer.
bug 200277762
JIRA: EVLR-1023
Change-Id: I08eb74bd3578834d45115098ed9936ebbb436fdf
Signed-off-by: David Nieto <dmartineznie@nvidia.com>
Reviewed-on: http://git-master/r/1320194
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 87 |
1 files changed, 44 insertions, 43 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index 26fbd66e..0249a1c6 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |||
@@ -1230,7 +1230,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp) | |||
1230 | 1230 | ||
1231 | int err; | 1231 | int err; |
1232 | 1232 | ||
1233 | err = gk20a_busy(g->dev); | 1233 | err = gk20a_busy(g); |
1234 | if (err) { | 1234 | if (err) { |
1235 | gk20a_err(dev_from_gk20a(g), "failed to release a channel!"); | 1235 | gk20a_err(dev_from_gk20a(g), "failed to release a channel!"); |
1236 | goto channel_release; | 1236 | goto channel_release; |
@@ -1239,7 +1239,7 @@ int gk20a_channel_release(struct inode *inode, struct file *filp) | |||
1239 | trace_gk20a_channel_release(dev_name(g->dev)); | 1239 | trace_gk20a_channel_release(dev_name(g->dev)); |
1240 | 1240 | ||
1241 | gk20a_channel_close(ch); | 1241 | gk20a_channel_close(ch); |
1242 | gk20a_idle(g->dev); | 1242 | gk20a_idle(g); |
1243 | 1243 | ||
1244 | channel_release: | 1244 | channel_release: |
1245 | gk20a_put(g); | 1245 | gk20a_put(g); |
@@ -1395,14 +1395,14 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp, s32 runlist_ | |||
1395 | goto free_ref; | 1395 | goto free_ref; |
1396 | } | 1396 | } |
1397 | 1397 | ||
1398 | err = gk20a_busy(g->dev); | 1398 | err = gk20a_busy(g); |
1399 | if (err) { | 1399 | if (err) { |
1400 | gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err); | 1400 | gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err); |
1401 | goto fail_busy; | 1401 | goto fail_busy; |
1402 | } | 1402 | } |
1403 | /* All the user space channel should be non privilege */ | 1403 | /* All the user space channel should be non privilege */ |
1404 | ch = gk20a_open_new_channel(g, runlist_id, false); | 1404 | ch = gk20a_open_new_channel(g, runlist_id, false); |
1405 | gk20a_idle(g->dev); | 1405 | gk20a_idle(g); |
1406 | if (!ch) { | 1406 | if (!ch) { |
1407 | gk20a_err(dev_from_gk20a(g), | 1407 | gk20a_err(dev_from_gk20a(g), |
1408 | "failed to get f"); | 1408 | "failed to get f"); |
@@ -2693,7 +2693,7 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c, | |||
2693 | 2693 | ||
2694 | channel_gk20a_free_job(c, job); | 2694 | channel_gk20a_free_job(c, job); |
2695 | job_finished = 1; | 2695 | job_finished = 1; |
2696 | gk20a_idle(g->dev); | 2696 | gk20a_idle(g); |
2697 | 2697 | ||
2698 | if (!clean_all) { | 2698 | if (!clean_all) { |
2699 | /* Timeout isn't supported here so don't touch it. */ | 2699 | /* Timeout isn't supported here so don't touch it. */ |
@@ -3120,7 +3120,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, | |||
3120 | return -EINVAL; | 3120 | return -EINVAL; |
3121 | 3121 | ||
3122 | /* released by job cleanup via syncpt or sema interrupt */ | 3122 | /* released by job cleanup via syncpt or sema interrupt */ |
3123 | err = gk20a_busy(g->dev); | 3123 | err = gk20a_busy(g); |
3124 | if (err) { | 3124 | if (err) { |
3125 | gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s", | 3125 | gk20a_err(d, "failed to host gk20a to submit gpfifo, process %s", |
3126 | current->comm); | 3126 | current->comm); |
@@ -3227,7 +3227,7 @@ clean_up: | |||
3227 | gk20a_fence_put(pre_fence); | 3227 | gk20a_fence_put(pre_fence); |
3228 | gk20a_fence_put(post_fence); | 3228 | gk20a_fence_put(post_fence); |
3229 | if (need_deferred_cleanup) | 3229 | if (need_deferred_cleanup) |
3230 | gk20a_idle(g->dev); | 3230 | gk20a_idle(g); |
3231 | return err; | 3231 | return err; |
3232 | } | 3232 | } |
3233 | 3233 | ||
@@ -3930,7 +3930,8 @@ long gk20a_channel_ioctl(struct file *filp, | |||
3930 | { | 3930 | { |
3931 | struct channel_priv *priv = filp->private_data; | 3931 | struct channel_priv *priv = filp->private_data; |
3932 | struct channel_gk20a *ch = priv->c; | 3932 | struct channel_gk20a *ch = priv->c; |
3933 | struct device *dev = ch->g->dev; | 3933 | struct gk20a *g = ch->g; |
3934 | struct device *dev = g->dev; | ||
3934 | u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0}; | 3935 | u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE] = {0}; |
3935 | int err = 0; | 3936 | int err = 0; |
3936 | 3937 | ||
@@ -3967,7 +3968,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
3967 | case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD: | 3968 | case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD: |
3968 | break; | 3969 | break; |
3969 | case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX: | 3970 | case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX: |
3970 | err = gk20a_busy(dev); | 3971 | err = gk20a_busy(g); |
3971 | if (err) { | 3972 | if (err) { |
3972 | dev_err(dev, | 3973 | dev_err(dev, |
3973 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 3974 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -3976,14 +3977,14 @@ long gk20a_channel_ioctl(struct file *filp, | |||
3976 | } | 3977 | } |
3977 | err = ch->g->ops.gr.alloc_obj_ctx(ch, | 3978 | err = ch->g->ops.gr.alloc_obj_ctx(ch, |
3978 | (struct nvgpu_alloc_obj_ctx_args *)buf); | 3979 | (struct nvgpu_alloc_obj_ctx_args *)buf); |
3979 | gk20a_idle(dev); | 3980 | gk20a_idle(g); |
3980 | break; | 3981 | break; |
3981 | case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX: | 3982 | case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO_EX: |
3982 | { | 3983 | { |
3983 | struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args = | 3984 | struct nvgpu_alloc_gpfifo_ex_args *alloc_gpfifo_ex_args = |
3984 | (struct nvgpu_alloc_gpfifo_ex_args *)buf; | 3985 | (struct nvgpu_alloc_gpfifo_ex_args *)buf; |
3985 | 3986 | ||
3986 | err = gk20a_busy(dev); | 3987 | err = gk20a_busy(g); |
3987 | if (err) { | 3988 | if (err) { |
3988 | dev_err(dev, | 3989 | dev_err(dev, |
3989 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 3990 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -3993,11 +3994,11 @@ long gk20a_channel_ioctl(struct file *filp, | |||
3993 | 3994 | ||
3994 | if (!is_power_of_2(alloc_gpfifo_ex_args->num_entries)) { | 3995 | if (!is_power_of_2(alloc_gpfifo_ex_args->num_entries)) { |
3995 | err = -EINVAL; | 3996 | err = -EINVAL; |
3996 | gk20a_idle(dev); | 3997 | gk20a_idle(g); |
3997 | break; | 3998 | break; |
3998 | } | 3999 | } |
3999 | err = gk20a_alloc_channel_gpfifo(ch, alloc_gpfifo_ex_args); | 4000 | err = gk20a_alloc_channel_gpfifo(ch, alloc_gpfifo_ex_args); |
4000 | gk20a_idle(dev); | 4001 | gk20a_idle(g); |
4001 | break; | 4002 | break; |
4002 | } | 4003 | } |
4003 | case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO: | 4004 | case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO: |
@@ -4006,7 +4007,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4006 | struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args = | 4007 | struct nvgpu_alloc_gpfifo_args *alloc_gpfifo_args = |
4007 | (struct nvgpu_alloc_gpfifo_args *)buf; | 4008 | (struct nvgpu_alloc_gpfifo_args *)buf; |
4008 | 4009 | ||
4009 | err = gk20a_busy(dev); | 4010 | err = gk20a_busy(g); |
4010 | if (err) { | 4011 | if (err) { |
4011 | dev_err(dev, | 4012 | dev_err(dev, |
4012 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4013 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4027,7 +4028,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4027 | alloc_gpfifo_ex_args.flags = alloc_gpfifo_args->flags; | 4028 | alloc_gpfifo_ex_args.flags = alloc_gpfifo_args->flags; |
4028 | 4029 | ||
4029 | err = gk20a_alloc_channel_gpfifo(ch, &alloc_gpfifo_ex_args); | 4030 | err = gk20a_alloc_channel_gpfifo(ch, &alloc_gpfifo_ex_args); |
4030 | gk20a_idle(dev); | 4031 | gk20a_idle(g); |
4031 | break; | 4032 | break; |
4032 | } | 4033 | } |
4033 | case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO: | 4034 | case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO: |
@@ -4035,7 +4036,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4035 | (struct nvgpu_submit_gpfifo_args *)buf); | 4036 | (struct nvgpu_submit_gpfifo_args *)buf); |
4036 | break; | 4037 | break; |
4037 | case NVGPU_IOCTL_CHANNEL_WAIT: | 4038 | case NVGPU_IOCTL_CHANNEL_WAIT: |
4038 | err = gk20a_busy(dev); | 4039 | err = gk20a_busy(g); |
4039 | if (err) { | 4040 | if (err) { |
4040 | dev_err(dev, | 4041 | dev_err(dev, |
4041 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4042 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4052,10 +4053,10 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4052 | 4053 | ||
4053 | nvgpu_mutex_acquire(&ch->ioctl_lock); | 4054 | nvgpu_mutex_acquire(&ch->ioctl_lock); |
4054 | 4055 | ||
4055 | gk20a_idle(dev); | 4056 | gk20a_idle(g); |
4056 | break; | 4057 | break; |
4057 | case NVGPU_IOCTL_CHANNEL_ZCULL_BIND: | 4058 | case NVGPU_IOCTL_CHANNEL_ZCULL_BIND: |
4058 | err = gk20a_busy(dev); | 4059 | err = gk20a_busy(g); |
4059 | if (err) { | 4060 | if (err) { |
4060 | dev_err(dev, | 4061 | dev_err(dev, |
4061 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4062 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4064,10 +4065,10 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4064 | } | 4065 | } |
4065 | err = gk20a_channel_zcull_bind(ch, | 4066 | err = gk20a_channel_zcull_bind(ch, |
4066 | (struct nvgpu_zcull_bind_args *)buf); | 4067 | (struct nvgpu_zcull_bind_args *)buf); |
4067 | gk20a_idle(dev); | 4068 | gk20a_idle(g); |
4068 | break; | 4069 | break; |
4069 | case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER: | 4070 | case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER: |
4070 | err = gk20a_busy(dev); | 4071 | err = gk20a_busy(g); |
4071 | if (err) { | 4072 | if (err) { |
4072 | dev_err(dev, | 4073 | dev_err(dev, |
4073 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4074 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4076,11 +4077,11 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4076 | } | 4077 | } |
4077 | err = gk20a_init_error_notifier(ch, | 4078 | err = gk20a_init_error_notifier(ch, |
4078 | (struct nvgpu_set_error_notifier *)buf); | 4079 | (struct nvgpu_set_error_notifier *)buf); |
4079 | gk20a_idle(dev); | 4080 | gk20a_idle(g); |
4080 | break; | 4081 | break; |
4081 | #ifdef CONFIG_GK20A_CYCLE_STATS | 4082 | #ifdef CONFIG_GK20A_CYCLE_STATS |
4082 | case NVGPU_IOCTL_CHANNEL_CYCLE_STATS: | 4083 | case NVGPU_IOCTL_CHANNEL_CYCLE_STATS: |
4083 | err = gk20a_busy(dev); | 4084 | err = gk20a_busy(g); |
4084 | if (err) { | 4085 | if (err) { |
4085 | dev_err(dev, | 4086 | dev_err(dev, |
4086 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4087 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4089,7 +4090,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4089 | } | 4090 | } |
4090 | err = gk20a_channel_cycle_stats(ch, | 4091 | err = gk20a_channel_cycle_stats(ch, |
4091 | (struct nvgpu_cycle_stats_args *)buf); | 4092 | (struct nvgpu_cycle_stats_args *)buf); |
4092 | gk20a_idle(dev); | 4093 | gk20a_idle(g); |
4093 | break; | 4094 | break; |
4094 | #endif | 4095 | #endif |
4095 | case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT: | 4096 | case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT: |
@@ -4123,7 +4124,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4123 | ch->has_timedout; | 4124 | ch->has_timedout; |
4124 | break; | 4125 | break; |
4125 | case NVGPU_IOCTL_CHANNEL_SET_PRIORITY: | 4126 | case NVGPU_IOCTL_CHANNEL_SET_PRIORITY: |
4126 | err = gk20a_busy(dev); | 4127 | err = gk20a_busy(g); |
4127 | if (err) { | 4128 | if (err) { |
4128 | dev_err(dev, | 4129 | dev_err(dev, |
4129 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4130 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4133,12 +4134,12 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4133 | err = ch->g->ops.fifo.channel_set_priority(ch, | 4134 | err = ch->g->ops.fifo.channel_set_priority(ch, |
4134 | ((struct nvgpu_set_priority_args *)buf)->priority); | 4135 | ((struct nvgpu_set_priority_args *)buf)->priority); |
4135 | 4136 | ||
4136 | gk20a_idle(dev); | 4137 | gk20a_idle(g); |
4137 | gk20a_channel_trace_sched_param( | 4138 | gk20a_channel_trace_sched_param( |
4138 | trace_gk20a_channel_set_priority, ch); | 4139 | trace_gk20a_channel_set_priority, ch); |
4139 | break; | 4140 | break; |
4140 | case NVGPU_IOCTL_CHANNEL_ENABLE: | 4141 | case NVGPU_IOCTL_CHANNEL_ENABLE: |
4141 | err = gk20a_busy(dev); | 4142 | err = gk20a_busy(g); |
4142 | if (err) { | 4143 | if (err) { |
4143 | dev_err(dev, | 4144 | dev_err(dev, |
4144 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4145 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4149,10 +4150,10 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4149 | ch->g->ops.fifo.enable_channel(ch); | 4150 | ch->g->ops.fifo.enable_channel(ch); |
4150 | else | 4151 | else |
4151 | err = -ENOSYS; | 4152 | err = -ENOSYS; |
4152 | gk20a_idle(dev); | 4153 | gk20a_idle(g); |
4153 | break; | 4154 | break; |
4154 | case NVGPU_IOCTL_CHANNEL_DISABLE: | 4155 | case NVGPU_IOCTL_CHANNEL_DISABLE: |
4155 | err = gk20a_busy(dev); | 4156 | err = gk20a_busy(g); |
4156 | if (err) { | 4157 | if (err) { |
4157 | dev_err(dev, | 4158 | dev_err(dev, |
4158 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4159 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4163,10 +4164,10 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4163 | ch->g->ops.fifo.disable_channel(ch); | 4164 | ch->g->ops.fifo.disable_channel(ch); |
4164 | else | 4165 | else |
4165 | err = -ENOSYS; | 4166 | err = -ENOSYS; |
4166 | gk20a_idle(dev); | 4167 | gk20a_idle(g); |
4167 | break; | 4168 | break; |
4168 | case NVGPU_IOCTL_CHANNEL_PREEMPT: | 4169 | case NVGPU_IOCTL_CHANNEL_PREEMPT: |
4169 | err = gk20a_busy(dev); | 4170 | err = gk20a_busy(g); |
4170 | if (err) { | 4171 | if (err) { |
4171 | dev_err(dev, | 4172 | dev_err(dev, |
4172 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4173 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4174,10 +4175,10 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4174 | break; | 4175 | break; |
4175 | } | 4176 | } |
4176 | err = gk20a_fifo_preempt(ch->g, ch); | 4177 | err = gk20a_fifo_preempt(ch->g, ch); |
4177 | gk20a_idle(dev); | 4178 | gk20a_idle(g); |
4178 | break; | 4179 | break; |
4179 | case NVGPU_IOCTL_CHANNEL_FORCE_RESET: | 4180 | case NVGPU_IOCTL_CHANNEL_FORCE_RESET: |
4180 | err = gk20a_busy(dev); | 4181 | err = gk20a_busy(g); |
4181 | if (err) { | 4182 | if (err) { |
4182 | dev_err(dev, | 4183 | dev_err(dev, |
4183 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4184 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4186,7 +4187,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4186 | } | 4187 | } |
4187 | err = ch->g->ops.fifo.force_reset_ch(ch, | 4188 | err = ch->g->ops.fifo.force_reset_ch(ch, |
4188 | NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR, true); | 4189 | NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR, true); |
4189 | gk20a_idle(dev); | 4190 | gk20a_idle(g); |
4190 | break; | 4191 | break; |
4191 | case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL: | 4192 | case NVGPU_IOCTL_CHANNEL_EVENT_ID_CTRL: |
4192 | err = gk20a_channel_event_id_ctrl(ch, | 4193 | err = gk20a_channel_event_id_ctrl(ch, |
@@ -4194,7 +4195,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4194 | break; | 4195 | break; |
4195 | #ifdef CONFIG_GK20A_CYCLE_STATS | 4196 | #ifdef CONFIG_GK20A_CYCLE_STATS |
4196 | case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT: | 4197 | case NVGPU_IOCTL_CHANNEL_CYCLE_STATS_SNAPSHOT: |
4197 | err = gk20a_busy(dev); | 4198 | err = gk20a_busy(g); |
4198 | if (err) { | 4199 | if (err) { |
4199 | dev_err(dev, | 4200 | dev_err(dev, |
4200 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4201 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4203,7 +4204,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4203 | } | 4204 | } |
4204 | err = gk20a_channel_cycle_stats_snapshot(ch, | 4205 | err = gk20a_channel_cycle_stats_snapshot(ch, |
4205 | (struct nvgpu_cycle_stats_snapshot_args *)buf); | 4206 | (struct nvgpu_cycle_stats_snapshot_args *)buf); |
4206 | gk20a_idle(dev); | 4207 | gk20a_idle(g); |
4207 | break; | 4208 | break; |
4208 | #endif | 4209 | #endif |
4209 | case NVGPU_IOCTL_CHANNEL_WDT: | 4210 | case NVGPU_IOCTL_CHANNEL_WDT: |
@@ -4211,7 +4212,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4211 | (struct nvgpu_channel_wdt_args *)buf); | 4212 | (struct nvgpu_channel_wdt_args *)buf); |
4212 | break; | 4213 | break; |
4213 | case NVGPU_IOCTL_CHANNEL_SET_RUNLIST_INTERLEAVE: | 4214 | case NVGPU_IOCTL_CHANNEL_SET_RUNLIST_INTERLEAVE: |
4214 | err = gk20a_busy(dev); | 4215 | err = gk20a_busy(g); |
4215 | if (err) { | 4216 | if (err) { |
4216 | dev_err(dev, | 4217 | dev_err(dev, |
4217 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4218 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4221,12 +4222,12 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4221 | err = gk20a_channel_set_runlist_interleave(ch, | 4222 | err = gk20a_channel_set_runlist_interleave(ch, |
4222 | ((struct nvgpu_runlist_interleave_args *)buf)->level); | 4223 | ((struct nvgpu_runlist_interleave_args *)buf)->level); |
4223 | 4224 | ||
4224 | gk20a_idle(dev); | 4225 | gk20a_idle(g); |
4225 | gk20a_channel_trace_sched_param( | 4226 | gk20a_channel_trace_sched_param( |
4226 | trace_gk20a_channel_set_runlist_interleave, ch); | 4227 | trace_gk20a_channel_set_runlist_interleave, ch); |
4227 | break; | 4228 | break; |
4228 | case NVGPU_IOCTL_CHANNEL_SET_TIMESLICE: | 4229 | case NVGPU_IOCTL_CHANNEL_SET_TIMESLICE: |
4229 | err = gk20a_busy(dev); | 4230 | err = gk20a_busy(g); |
4230 | if (err) { | 4231 | if (err) { |
4231 | dev_err(dev, | 4232 | dev_err(dev, |
4232 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4233 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4236,13 +4237,13 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4236 | err = ch->g->ops.fifo.channel_set_timeslice(ch, | 4237 | err = ch->g->ops.fifo.channel_set_timeslice(ch, |
4237 | ((struct nvgpu_timeslice_args *)buf)->timeslice_us); | 4238 | ((struct nvgpu_timeslice_args *)buf)->timeslice_us); |
4238 | 4239 | ||
4239 | gk20a_idle(dev); | 4240 | gk20a_idle(g); |
4240 | gk20a_channel_trace_sched_param( | 4241 | gk20a_channel_trace_sched_param( |
4241 | trace_gk20a_channel_set_timeslice, ch); | 4242 | trace_gk20a_channel_set_timeslice, ch); |
4242 | break; | 4243 | break; |
4243 | case NVGPU_IOCTL_CHANNEL_SET_PREEMPTION_MODE: | 4244 | case NVGPU_IOCTL_CHANNEL_SET_PREEMPTION_MODE: |
4244 | if (ch->g->ops.gr.set_preemption_mode) { | 4245 | if (ch->g->ops.gr.set_preemption_mode) { |
4245 | err = gk20a_busy(dev); | 4246 | err = gk20a_busy(g); |
4246 | if (err) { | 4247 | if (err) { |
4247 | dev_err(dev, | 4248 | dev_err(dev, |
4248 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4249 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4252,7 +4253,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4252 | err = ch->g->ops.gr.set_preemption_mode(ch, | 4253 | err = ch->g->ops.gr.set_preemption_mode(ch, |
4253 | ((struct nvgpu_preemption_mode_args *)buf)->graphics_preempt_mode, | 4254 | ((struct nvgpu_preemption_mode_args *)buf)->graphics_preempt_mode, |
4254 | ((struct nvgpu_preemption_mode_args *)buf)->compute_preempt_mode); | 4255 | ((struct nvgpu_preemption_mode_args *)buf)->compute_preempt_mode); |
4255 | gk20a_idle(dev); | 4256 | gk20a_idle(g); |
4256 | } else { | 4257 | } else { |
4257 | err = -EINVAL; | 4258 | err = -EINVAL; |
4258 | } | 4259 | } |
@@ -4262,7 +4263,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4262 | bool boost = | 4263 | bool boost = |
4263 | ((struct nvgpu_boosted_ctx_args *)buf)->boost; | 4264 | ((struct nvgpu_boosted_ctx_args *)buf)->boost; |
4264 | 4265 | ||
4265 | err = gk20a_busy(dev); | 4266 | err = gk20a_busy(g); |
4266 | if (err) { | 4267 | if (err) { |
4267 | dev_err(dev, | 4268 | dev_err(dev, |
4268 | "%s: failed to host gk20a for ioctl cmd: 0x%x", | 4269 | "%s: failed to host gk20a for ioctl cmd: 0x%x", |
@@ -4270,7 +4271,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
4270 | break; | 4271 | break; |
4271 | } | 4272 | } |
4272 | err = ch->g->ops.gr.set_boosted_ctx(ch, boost); | 4273 | err = ch->g->ops.gr.set_boosted_ctx(ch, boost); |
4273 | gk20a_idle(dev); | 4274 | gk20a_idle(g); |
4274 | } else { | 4275 | } else { |
4275 | err = -EINVAL; | 4276 | err = -EINVAL; |
4276 | } | 4277 | } |