diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 76 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 16 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 14 |
3 files changed, 90 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index f58b208c..8a9729ab 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |||
@@ -3225,6 +3225,8 @@ clean_up: | |||
3225 | int gk20a_init_channel_support(struct gk20a *g, u32 chid) | 3225 | int gk20a_init_channel_support(struct gk20a *g, u32 chid) |
3226 | { | 3226 | { |
3227 | struct channel_gk20a *c = g->fifo.channel+chid; | 3227 | struct channel_gk20a *c = g->fifo.channel+chid; |
3228 | int err; | ||
3229 | |||
3228 | c->g = NULL; | 3230 | c->g = NULL; |
3229 | c->hw_chid = chid; | 3231 | c->hw_chid = chid; |
3230 | atomic_set(&c->bound, false); | 3232 | atomic_set(&c->bound, false); |
@@ -3232,31 +3234,72 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid) | |||
3232 | atomic_set(&c->ref_count, 0); | 3234 | atomic_set(&c->ref_count, 0); |
3233 | c->referenceable = false; | 3235 | c->referenceable = false; |
3234 | init_waitqueue_head(&c->ref_count_dec_wq); | 3236 | init_waitqueue_head(&c->ref_count_dec_wq); |
3237 | |||
3235 | #if GK20A_CHANNEL_REFCOUNT_TRACKING | 3238 | #if GK20A_CHANNEL_REFCOUNT_TRACKING |
3236 | nvgpu_spinlock_init(&c->ref_actions_lock); | 3239 | nvgpu_spinlock_init(&c->ref_actions_lock); |
3237 | #endif | 3240 | #endif |
3238 | nvgpu_mutex_init(&c->ioctl_lock); | ||
3239 | nvgpu_mutex_init(&c->error_notifier_mutex); | ||
3240 | nvgpu_mutex_init(&c->joblist.cleanup_lock); | ||
3241 | nvgpu_spinlock_init(&c->joblist.dynamic.lock); | 3241 | nvgpu_spinlock_init(&c->joblist.dynamic.lock); |
3242 | nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock); | ||
3243 | nvgpu_raw_spinlock_init(&c->timeout.lock); | 3242 | nvgpu_raw_spinlock_init(&c->timeout.lock); |
3244 | nvgpu_mutex_init(&c->sync_lock); | ||
3245 | 3243 | ||
3246 | INIT_LIST_HEAD(&c->joblist.dynamic.jobs); | 3244 | INIT_LIST_HEAD(&c->joblist.dynamic.jobs); |
3247 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
3248 | nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex); | ||
3249 | nvgpu_mutex_init(&c->cs_client_mutex); | ||
3250 | #endif | ||
3251 | INIT_LIST_HEAD(&c->dbg_s_list); | 3245 | INIT_LIST_HEAD(&c->dbg_s_list); |
3252 | INIT_LIST_HEAD(&c->event_id_list); | 3246 | INIT_LIST_HEAD(&c->event_id_list); |
3253 | nvgpu_mutex_init(&c->event_id_list_lock); | ||
3254 | nvgpu_mutex_init(&c->dbg_s_lock); | ||
3255 | list_add(&c->free_chs, &g->fifo.free_chs); | ||
3256 | |||
3257 | INIT_LIST_HEAD(&c->worker_item); | 3247 | INIT_LIST_HEAD(&c->worker_item); |
3258 | 3248 | ||
3249 | err = nvgpu_mutex_init(&c->ioctl_lock); | ||
3250 | if (err) | ||
3251 | return err; | ||
3252 | err = nvgpu_mutex_init(&c->error_notifier_mutex); | ||
3253 | if (err) | ||
3254 | goto fail_1; | ||
3255 | err = nvgpu_mutex_init(&c->joblist.cleanup_lock); | ||
3256 | if (err) | ||
3257 | goto fail_2; | ||
3258 | err = nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock); | ||
3259 | if (err) | ||
3260 | goto fail_3; | ||
3261 | err = nvgpu_mutex_init(&c->sync_lock); | ||
3262 | if (err) | ||
3263 | goto fail_4; | ||
3264 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
3265 | err = nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex); | ||
3266 | if (err) | ||
3267 | goto fail_5; | ||
3268 | err = nvgpu_mutex_init(&c->cs_client_mutex); | ||
3269 | if (err) | ||
3270 | goto fail_6; | ||
3271 | #endif | ||
3272 | err = nvgpu_mutex_init(&c->event_id_list_lock); | ||
3273 | if (err) | ||
3274 | goto fail_7; | ||
3275 | err = nvgpu_mutex_init(&c->dbg_s_lock); | ||
3276 | if (err) | ||
3277 | goto fail_8; | ||
3278 | |||
3279 | list_add(&c->free_chs, &g->fifo.free_chs); | ||
3280 | |||
3259 | return 0; | 3281 | return 0; |
3282 | |||
3283 | fail_8: | ||
3284 | nvgpu_mutex_destroy(&c->event_id_list_lock); | ||
3285 | fail_7: | ||
3286 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
3287 | nvgpu_mutex_destroy(&c->cs_client_mutex); | ||
3288 | fail_6: | ||
3289 | nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); | ||
3290 | fail_5: | ||
3291 | #endif | ||
3292 | nvgpu_mutex_destroy(&c->sync_lock); | ||
3293 | fail_4: | ||
3294 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); | ||
3295 | fail_3: | ||
3296 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); | ||
3297 | fail_2: | ||
3298 | nvgpu_mutex_destroy(&c->error_notifier_mutex); | ||
3299 | fail_1: | ||
3300 | nvgpu_mutex_destroy(&c->ioctl_lock); | ||
3301 | |||
3302 | return err; | ||
3260 | } | 3303 | } |
3261 | 3304 | ||
3262 | static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch, | 3305 | static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch, |
@@ -3460,6 +3503,7 @@ static int gk20a_event_id_release(struct inode *inode, struct file *filp) | |||
3460 | nvgpu_mutex_release(&ch->event_id_list_lock); | 3503 | nvgpu_mutex_release(&ch->event_id_list_lock); |
3461 | } | 3504 | } |
3462 | 3505 | ||
3506 | nvgpu_mutex_destroy(&event_id_data->lock); | ||
3463 | kfree(event_id_data); | 3507 | kfree(event_id_data); |
3464 | filp->private_data = NULL; | 3508 | filp->private_data = NULL; |
3465 | 3509 | ||
@@ -3562,7 +3606,9 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch, | |||
3562 | event_id_data->event_id = event_id; | 3606 | event_id_data->event_id = event_id; |
3563 | 3607 | ||
3564 | init_waitqueue_head(&event_id_data->event_id_wq); | 3608 | init_waitqueue_head(&event_id_data->event_id_wq); |
3565 | nvgpu_mutex_init(&event_id_data->lock); | 3609 | err = nvgpu_mutex_init(&event_id_data->lock); |
3610 | if (err) | ||
3611 | goto clean_up_free; | ||
3566 | INIT_LIST_HEAD(&event_id_data->event_id_node); | 3612 | INIT_LIST_HEAD(&event_id_data->event_id_node); |
3567 | 3613 | ||
3568 | nvgpu_mutex_acquire(&ch->event_id_list_lock); | 3614 | nvgpu_mutex_acquire(&ch->event_id_list_lock); |
@@ -3576,6 +3622,8 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch, | |||
3576 | 3622 | ||
3577 | return 0; | 3623 | return 0; |
3578 | 3624 | ||
3625 | clean_up_free: | ||
3626 | kfree(event_id_data); | ||
3579 | clean_up_file: | 3627 | clean_up_file: |
3580 | fput(file); | 3628 | fput(file); |
3581 | clean_up: | 3629 | clean_up: |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 04d68872..a2846d2a 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -513,6 +513,7 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) | |||
513 | */ | 513 | */ |
514 | for (; i < f->num_channels; i++) { | 514 | for (; i < f->num_channels; i++) { |
515 | struct channel_gk20a *c = f->channel + i; | 515 | struct channel_gk20a *c = f->channel + i; |
516 | struct tsg_gk20a *tsg = f->tsg + i; | ||
516 | 517 | ||
517 | /* | 518 | /* |
518 | * Could race but worst that happens is we get an error message | 519 | * Could race but worst that happens is we get an error message |
@@ -520,6 +521,21 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) | |||
520 | */ | 521 | */ |
521 | if (c->referenceable) | 522 | if (c->referenceable) |
522 | __gk20a_channel_kill(c); | 523 | __gk20a_channel_kill(c); |
524 | |||
525 | nvgpu_mutex_destroy(&tsg->event_id_list_lock); | ||
526 | |||
527 | nvgpu_mutex_destroy(&c->ioctl_lock); | ||
528 | nvgpu_mutex_destroy(&c->error_notifier_mutex); | ||
529 | nvgpu_mutex_destroy(&c->joblist.cleanup_lock); | ||
530 | nvgpu_mutex_destroy(&c->joblist.pre_alloc.read_lock); | ||
531 | nvgpu_mutex_destroy(&c->sync_lock); | ||
532 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
533 | nvgpu_mutex_destroy(&c->cyclestate.cyclestate_buffer_mutex); | ||
534 | nvgpu_mutex_destroy(&c->cs_client_mutex); | ||
535 | #endif | ||
536 | nvgpu_mutex_destroy(&c->event_id_list_lock); | ||
537 | nvgpu_mutex_destroy(&c->dbg_s_lock); | ||
538 | |||
523 | } | 539 | } |
524 | 540 | ||
525 | vfree(f->channel); | 541 | vfree(f->channel); |
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index aadf5463..e1424f2b 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | |||
@@ -156,6 +156,7 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) | |||
156 | int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) | 156 | int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) |
157 | { | 157 | { |
158 | struct tsg_gk20a *tsg = NULL; | 158 | struct tsg_gk20a *tsg = NULL; |
159 | int err; | ||
159 | 160 | ||
160 | if (tsgid >= g->fifo.num_channels) | 161 | if (tsgid >= g->fifo.num_channels) |
161 | return -EINVAL; | 162 | return -EINVAL; |
@@ -169,7 +170,11 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) | |||
169 | init_rwsem(&tsg->ch_list_lock); | 170 | init_rwsem(&tsg->ch_list_lock); |
170 | 171 | ||
171 | INIT_LIST_HEAD(&tsg->event_id_list); | 172 | INIT_LIST_HEAD(&tsg->event_id_list); |
172 | nvgpu_mutex_init(&tsg->event_id_list_lock); | 173 | err = nvgpu_mutex_init(&tsg->event_id_list_lock); |
174 | if (err) { | ||
175 | tsg->in_use = true; /* make this TSG unusable */ | ||
176 | return err; | ||
177 | } | ||
173 | 178 | ||
174 | return 0; | 179 | return 0; |
175 | } | 180 | } |
@@ -287,7 +292,10 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg, | |||
287 | event_id_data->event_id = event_id; | 292 | event_id_data->event_id = event_id; |
288 | 293 | ||
289 | init_waitqueue_head(&event_id_data->event_id_wq); | 294 | init_waitqueue_head(&event_id_data->event_id_wq); |
290 | nvgpu_mutex_init(&event_id_data->lock); | 295 | err = nvgpu_mutex_init(&event_id_data->lock); |
296 | if (err) | ||
297 | goto clean_up_free; | ||
298 | |||
291 | INIT_LIST_HEAD(&event_id_data->event_id_node); | 299 | INIT_LIST_HEAD(&event_id_data->event_id_node); |
292 | 300 | ||
293 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); | 301 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); |
@@ -301,6 +309,8 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg, | |||
301 | 309 | ||
302 | return 0; | 310 | return 0; |
303 | 311 | ||
312 | clean_up_free: | ||
313 | kfree(event_id_data); | ||
304 | clean_up_file: | 314 | clean_up_file: |
305 | fput(file); | 315 | fput(file); |
306 | clean_up: | 316 | clean_up: |