diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
35 files changed, 654 insertions, 651 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c index 2a9ad40d..d43bc93f 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c | |||
@@ -101,9 +101,9 @@ __acquires(&cde_app->mutex) | |||
101 | return; | 101 | return; |
102 | 102 | ||
103 | if (wait_finish) { | 103 | if (wait_finish) { |
104 | mutex_unlock(&cde_app->mutex); | 104 | nvgpu_mutex_release(&cde_app->mutex); |
105 | cancel_delayed_work_sync(&cde_ctx->ctx_deleter_work); | 105 | cancel_delayed_work_sync(&cde_ctx->ctx_deleter_work); |
106 | mutex_lock(&cde_app->mutex); | 106 | nvgpu_mutex_acquire(&cde_app->mutex); |
107 | } else { | 107 | } else { |
108 | cancel_delayed_work(&cde_ctx->ctx_deleter_work); | 108 | cancel_delayed_work(&cde_ctx->ctx_deleter_work); |
109 | } | 109 | } |
@@ -152,9 +152,9 @@ __releases(&cde_app->mutex) | |||
152 | if (!cde_app->initialised) | 152 | if (!cde_app->initialised) |
153 | return; | 153 | return; |
154 | 154 | ||
155 | mutex_lock(&cde_app->mutex); | 155 | nvgpu_mutex_acquire(&cde_app->mutex); |
156 | gk20a_cde_stop(g); | 156 | gk20a_cde_stop(g); |
157 | mutex_unlock(&cde_app->mutex); | 157 | nvgpu_mutex_release(&cde_app->mutex); |
158 | } | 158 | } |
159 | 159 | ||
160 | void gk20a_cde_suspend(struct gk20a *g) | 160 | void gk20a_cde_suspend(struct gk20a *g) |
@@ -167,7 +167,7 @@ __releases(&cde_app->mutex) | |||
167 | if (!cde_app->initialised) | 167 | if (!cde_app->initialised) |
168 | return; | 168 | return; |
169 | 169 | ||
170 | mutex_lock(&cde_app->mutex); | 170 | nvgpu_mutex_acquire(&cde_app->mutex); |
171 | 171 | ||
172 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, | 172 | list_for_each_entry_safe(cde_ctx, cde_ctx_save, |
173 | &cde_app->free_contexts, list) { | 173 | &cde_app->free_contexts, list) { |
@@ -179,7 +179,7 @@ __releases(&cde_app->mutex) | |||
179 | gk20a_cde_cancel_deleter(cde_ctx, false); | 179 | gk20a_cde_cancel_deleter(cde_ctx, false); |
180 | } | 180 | } |
181 | 181 | ||
182 | mutex_unlock(&cde_app->mutex); | 182 | nvgpu_mutex_release(&cde_app->mutex); |
183 | 183 | ||
184 | } | 184 | } |
185 | 185 | ||
@@ -739,7 +739,7 @@ __releases(&cde_app->mutex) | |||
739 | gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); | 739 | gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); |
740 | trace_gk20a_cde_release(cde_ctx); | 740 | trace_gk20a_cde_release(cde_ctx); |
741 | 741 | ||
742 | mutex_lock(&cde_app->mutex); | 742 | nvgpu_mutex_acquire(&cde_app->mutex); |
743 | 743 | ||
744 | if (cde_ctx->in_use) { | 744 | if (cde_ctx->in_use) { |
745 | cde_ctx->in_use = false; | 745 | cde_ctx->in_use = false; |
@@ -749,7 +749,7 @@ __releases(&cde_app->mutex) | |||
749 | gk20a_dbg_info("double release cde context %p", cde_ctx); | 749 | gk20a_dbg_info("double release cde context %p", cde_ctx); |
750 | } | 750 | } |
751 | 751 | ||
752 | mutex_unlock(&cde_app->mutex); | 752 | nvgpu_mutex_release(&cde_app->mutex); |
753 | } | 753 | } |
754 | 754 | ||
755 | static void gk20a_cde_ctx_deleter_fn(struct work_struct *work) | 755 | static void gk20a_cde_ctx_deleter_fn(struct work_struct *work) |
@@ -779,7 +779,7 @@ __releases(&cde_app->mutex) | |||
779 | return; | 779 | return; |
780 | } | 780 | } |
781 | 781 | ||
782 | mutex_lock(&cde_app->mutex); | 782 | nvgpu_mutex_acquire(&cde_app->mutex); |
783 | if (cde_ctx->in_use || !cde_app->initialised) { | 783 | if (cde_ctx->in_use || !cde_app->initialised) { |
784 | gk20a_dbg(gpu_dbg_cde_ctx, | 784 | gk20a_dbg(gpu_dbg_cde_ctx, |
785 | "cde: context use raced, not deleting %p", | 785 | "cde: context use raced, not deleting %p", |
@@ -797,7 +797,7 @@ __releases(&cde_app->mutex) | |||
797 | cde_app->ctx_count_top); | 797 | cde_app->ctx_count_top); |
798 | 798 | ||
799 | out: | 799 | out: |
800 | mutex_unlock(&cde_app->mutex); | 800 | nvgpu_mutex_release(&cde_app->mutex); |
801 | gk20a_idle(dev); | 801 | gk20a_idle(dev); |
802 | } | 802 | } |
803 | 803 | ||
@@ -876,9 +876,9 @@ __acquires(&cde_app->mutex) | |||
876 | break; | 876 | break; |
877 | 877 | ||
878 | /* exhausted, retry */ | 878 | /* exhausted, retry */ |
879 | mutex_unlock(&cde_app->mutex); | 879 | nvgpu_mutex_release(&cde_app->mutex); |
880 | cond_resched(); | 880 | cond_resched(); |
881 | mutex_lock(&cde_app->mutex); | 881 | nvgpu_mutex_acquire(&cde_app->mutex); |
882 | } while (!nvgpu_timeout_expired(&timeout)); | 882 | } while (!nvgpu_timeout_expired(&timeout)); |
883 | 883 | ||
884 | return cde_ctx; | 884 | return cde_ctx; |
@@ -946,7 +946,7 @@ __releases(&cde_app->mutex) | |||
946 | scatterbuffer_byte_offset < compbits_byte_offset) | 946 | scatterbuffer_byte_offset < compbits_byte_offset) |
947 | return -EINVAL; | 947 | return -EINVAL; |
948 | 948 | ||
949 | mutex_lock(&g->cde_app.mutex); | 949 | nvgpu_mutex_acquire(&g->cde_app.mutex); |
950 | 950 | ||
951 | cde_ctx = gk20a_cde_get_context(g); | 951 | cde_ctx = gk20a_cde_get_context(g); |
952 | if (IS_ERR(cde_ctx)) { | 952 | if (IS_ERR(cde_ctx)) { |
@@ -1118,7 +1118,7 @@ exit_unlock: | |||
1118 | if (surface) | 1118 | if (surface) |
1119 | dma_buf_vunmap(compbits_scatter_buf, surface); | 1119 | dma_buf_vunmap(compbits_scatter_buf, surface); |
1120 | 1120 | ||
1121 | mutex_unlock(&g->cde_app.mutex); | 1121 | nvgpu_mutex_release(&g->cde_app.mutex); |
1122 | return err; | 1122 | return err; |
1123 | } | 1123 | } |
1124 | 1124 | ||
@@ -1155,13 +1155,13 @@ __releases(&cde_app->mutex) | |||
1155 | "cde: channel had timed out" | 1155 | "cde: channel had timed out" |
1156 | ", reloading"); | 1156 | ", reloading"); |
1157 | /* mark it to be deleted, replace with a new one */ | 1157 | /* mark it to be deleted, replace with a new one */ |
1158 | mutex_lock(&cde_app->mutex); | 1158 | nvgpu_mutex_acquire(&cde_app->mutex); |
1159 | cde_ctx->is_temporary = true; | 1159 | cde_ctx->is_temporary = true; |
1160 | if (gk20a_cde_create_context(g)) { | 1160 | if (gk20a_cde_create_context(g)) { |
1161 | gk20a_err(cde_ctx->dev, | 1161 | gk20a_err(cde_ctx->dev, |
1162 | "cde: can't replace context"); | 1162 | "cde: can't replace context"); |
1163 | } | 1163 | } |
1164 | mutex_unlock(&cde_app->mutex); | 1164 | nvgpu_mutex_release(&cde_app->mutex); |
1165 | } | 1165 | } |
1166 | } | 1166 | } |
1167 | 1167 | ||
@@ -1274,7 +1274,7 @@ __releases(&cde_app->mutex) | |||
1274 | if (err) | 1274 | if (err) |
1275 | return err; | 1275 | return err; |
1276 | 1276 | ||
1277 | mutex_lock(&cde_app->mutex); | 1277 | nvgpu_mutex_acquire(&cde_app->mutex); |
1278 | 1278 | ||
1279 | gk20a_cde_stop(g); | 1279 | gk20a_cde_stop(g); |
1280 | 1280 | ||
@@ -1282,7 +1282,7 @@ __releases(&cde_app->mutex) | |||
1282 | if (!err) | 1282 | if (!err) |
1283 | cde_app->initialised = true; | 1283 | cde_app->initialised = true; |
1284 | 1284 | ||
1285 | mutex_unlock(&cde_app->mutex); | 1285 | nvgpu_mutex_release(&cde_app->mutex); |
1286 | 1286 | ||
1287 | gk20a_idle(g->dev); | 1287 | gk20a_idle(g->dev); |
1288 | return err; | 1288 | return err; |
@@ -1300,8 +1300,8 @@ __releases(&cde_app->mutex) | |||
1300 | 1300 | ||
1301 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); | 1301 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); |
1302 | 1302 | ||
1303 | mutex_init(&cde_app->mutex); | 1303 | nvgpu_mutex_init(&cde_app->mutex); |
1304 | mutex_lock(&cde_app->mutex); | 1304 | nvgpu_mutex_acquire(&cde_app->mutex); |
1305 | 1305 | ||
1306 | INIT_LIST_HEAD(&cde_app->free_contexts); | 1306 | INIT_LIST_HEAD(&cde_app->free_contexts); |
1307 | INIT_LIST_HEAD(&cde_app->used_contexts); | 1307 | INIT_LIST_HEAD(&cde_app->used_contexts); |
@@ -1313,7 +1313,7 @@ __releases(&cde_app->mutex) | |||
1313 | if (!err) | 1313 | if (!err) |
1314 | cde_app->initialised = true; | 1314 | cde_app->initialised = true; |
1315 | 1315 | ||
1316 | mutex_unlock(&cde_app->mutex); | 1316 | nvgpu_mutex_release(&cde_app->mutex); |
1317 | gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err); | 1317 | gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err); |
1318 | return err; | 1318 | return err; |
1319 | } | 1319 | } |
@@ -1561,7 +1561,7 @@ int gk20a_prepare_compressible_read( | |||
1561 | 1561 | ||
1562 | missing_bits = (state->valid_compbits ^ request) & request; | 1562 | missing_bits = (state->valid_compbits ^ request) & request; |
1563 | 1563 | ||
1564 | mutex_lock(&state->lock); | 1564 | nvgpu_mutex_acquire(&state->lock); |
1565 | 1565 | ||
1566 | if (state->valid_compbits && request == NVGPU_GPU_COMPBITS_NONE) { | 1566 | if (state->valid_compbits && request == NVGPU_GPU_COMPBITS_NONE) { |
1567 | 1567 | ||
@@ -1599,7 +1599,7 @@ int gk20a_prepare_compressible_read( | |||
1599 | *zbc_color = state->zbc_color; | 1599 | *zbc_color = state->zbc_color; |
1600 | 1600 | ||
1601 | out: | 1601 | out: |
1602 | mutex_unlock(&state->lock); | 1602 | nvgpu_mutex_release(&state->lock); |
1603 | dma_buf_put(dmabuf); | 1603 | dma_buf_put(dmabuf); |
1604 | return err; | 1604 | return err; |
1605 | } | 1605 | } |
@@ -1624,7 +1624,7 @@ int gk20a_mark_compressible_write(struct gk20a *g, u32 buffer_fd, | |||
1624 | return err; | 1624 | return err; |
1625 | } | 1625 | } |
1626 | 1626 | ||
1627 | mutex_lock(&state->lock); | 1627 | nvgpu_mutex_acquire(&state->lock); |
1628 | 1628 | ||
1629 | /* Update the compbits state. */ | 1629 | /* Update the compbits state. */ |
1630 | state->valid_compbits = valid_compbits; | 1630 | state->valid_compbits = valid_compbits; |
@@ -1634,7 +1634,7 @@ int gk20a_mark_compressible_write(struct gk20a *g, u32 buffer_fd, | |||
1634 | gk20a_fence_put(state->fence); | 1634 | gk20a_fence_put(state->fence); |
1635 | state->fence = NULL; | 1635 | state->fence = NULL; |
1636 | 1636 | ||
1637 | mutex_unlock(&state->lock); | 1637 | nvgpu_mutex_release(&state->lock); |
1638 | dma_buf_put(dmabuf); | 1638 | dma_buf_put(dmabuf); |
1639 | return 0; | 1639 | return 0; |
1640 | } | 1640 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h index 8cdba938..1136b0ad 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A color decompression engine support | 2 | * GK20A color decompression engine support |
3 | * | 3 | * |
4 | * Copyright (c) 2014-2016, NVIDIA Corporation. All rights reserved. | 4 | * Copyright (c) 2014-2017, NVIDIA Corporation. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -262,7 +262,7 @@ struct gk20a_cde_ctx { | |||
262 | 262 | ||
263 | struct gk20a_cde_app { | 263 | struct gk20a_cde_app { |
264 | bool initialised; | 264 | bool initialised; |
265 | struct mutex mutex; | 265 | struct nvgpu_mutex mutex; |
266 | 266 | ||
267 | struct list_head free_contexts; | 267 | struct list_head free_contexts; |
268 | struct list_head used_contexts; | 268 | struct list_head used_contexts; |
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 023c959e..fd248313 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | |||
@@ -107,7 +107,7 @@ static void gk20a_ce_notify_all_user(struct gk20a *g, u32 event) | |||
107 | if (!ce_app->initialised) | 107 | if (!ce_app->initialised) |
108 | return; | 108 | return; |
109 | 109 | ||
110 | mutex_lock(&ce_app->app_mutex); | 110 | nvgpu_mutex_acquire(&ce_app->app_mutex); |
111 | 111 | ||
112 | list_for_each_entry_safe(ce_ctx, ce_ctx_save, | 112 | list_for_each_entry_safe(ce_ctx, ce_ctx_save, |
113 | &ce_app->allocated_contexts, list) { | 113 | &ce_app->allocated_contexts, list) { |
@@ -117,7 +117,7 @@ static void gk20a_ce_notify_all_user(struct gk20a *g, u32 event) | |||
117 | } | 117 | } |
118 | } | 118 | } |
119 | 119 | ||
120 | mutex_unlock(&ce_app->app_mutex); | 120 | nvgpu_mutex_release(&ce_app->app_mutex); |
121 | } | 121 | } |
122 | 122 | ||
123 | static void gk20a_ce_finished_ctx_cb(struct channel_gk20a *ch, void *data) | 123 | static void gk20a_ce_finished_ctx_cb(struct channel_gk20a *ch, void *data) |
@@ -183,14 +183,14 @@ static void gk20a_ce_free_command_buffer_stored_fence(struct gk20a_gpu_ctx *ce_c | |||
183 | } | 183 | } |
184 | } | 184 | } |
185 | 185 | ||
186 | /* assume this api should need to call under mutex_lock(&ce_app->app_mutex) */ | 186 | /* assume this api should need to call under nvgpu_mutex_acquire(&ce_app->app_mutex) */ |
187 | static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) | 187 | static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) |
188 | { | 188 | { |
189 | struct list_head *list = &ce_ctx->list; | 189 | struct list_head *list = &ce_ctx->list; |
190 | 190 | ||
191 | ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_DELETED; | 191 | ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_DELETED; |
192 | 192 | ||
193 | mutex_lock(&ce_ctx->gpu_ctx_mutex); | 193 | nvgpu_mutex_acquire(&ce_ctx->gpu_ctx_mutex); |
194 | 194 | ||
195 | if (ce_ctx->cmd_buf_mem.cpu_va) { | 195 | if (ce_ctx->cmd_buf_mem.cpu_va) { |
196 | gk20a_ce_free_command_buffer_stored_fence(ce_ctx); | 196 | gk20a_ce_free_command_buffer_stored_fence(ce_ctx); |
@@ -205,8 +205,8 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) | |||
205 | if (list->prev && list->next) | 205 | if (list->prev && list->next) |
206 | list_del(list); | 206 | list_del(list); |
207 | 207 | ||
208 | mutex_unlock(&ce_ctx->gpu_ctx_mutex); | 208 | nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex); |
209 | mutex_destroy(&ce_ctx->gpu_ctx_mutex); | 209 | nvgpu_mutex_destroy(&ce_ctx->gpu_ctx_mutex); |
210 | 210 | ||
211 | kfree(ce_ctx); | 211 | kfree(ce_ctx); |
212 | } | 212 | } |
@@ -353,8 +353,8 @@ int gk20a_init_ce_support(struct gk20a *g) | |||
353 | 353 | ||
354 | gk20a_dbg(gpu_dbg_fn, "ce: init"); | 354 | gk20a_dbg(gpu_dbg_fn, "ce: init"); |
355 | 355 | ||
356 | mutex_init(&ce_app->app_mutex); | 356 | nvgpu_mutex_init(&ce_app->app_mutex); |
357 | mutex_lock(&ce_app->app_mutex); | 357 | nvgpu_mutex_acquire(&ce_app->app_mutex); |
358 | 358 | ||
359 | INIT_LIST_HEAD(&ce_app->allocated_contexts); | 359 | INIT_LIST_HEAD(&ce_app->allocated_contexts); |
360 | ce_app->ctx_count = 0; | 360 | ce_app->ctx_count = 0; |
@@ -362,7 +362,7 @@ int gk20a_init_ce_support(struct gk20a *g) | |||
362 | ce_app->initialised = true; | 362 | ce_app->initialised = true; |
363 | ce_app->app_state = NVGPU_CE_ACTIVE; | 363 | ce_app->app_state = NVGPU_CE_ACTIVE; |
364 | 364 | ||
365 | mutex_unlock(&ce_app->app_mutex); | 365 | nvgpu_mutex_release(&ce_app->app_mutex); |
366 | gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished"); | 366 | gk20a_dbg(gpu_dbg_cde_ctx, "ce: init finished"); |
367 | 367 | ||
368 | return 0; | 368 | return 0; |
@@ -379,7 +379,7 @@ void gk20a_ce_destroy(struct gk20a *g) | |||
379 | ce_app->app_state = NVGPU_CE_SUSPEND; | 379 | ce_app->app_state = NVGPU_CE_SUSPEND; |
380 | ce_app->initialised = false; | 380 | ce_app->initialised = false; |
381 | 381 | ||
382 | mutex_lock(&ce_app->app_mutex); | 382 | nvgpu_mutex_acquire(&ce_app->app_mutex); |
383 | 383 | ||
384 | list_for_each_entry_safe(ce_ctx, ce_ctx_save, | 384 | list_for_each_entry_safe(ce_ctx, ce_ctx_save, |
385 | &ce_app->allocated_contexts, list) { | 385 | &ce_app->allocated_contexts, list) { |
@@ -390,8 +390,8 @@ void gk20a_ce_destroy(struct gk20a *g) | |||
390 | ce_app->ctx_count = 0; | 390 | ce_app->ctx_count = 0; |
391 | ce_app->next_ctx_id = 0; | 391 | ce_app->next_ctx_id = 0; |
392 | 392 | ||
393 | mutex_unlock(&ce_app->app_mutex); | 393 | nvgpu_mutex_release(&ce_app->app_mutex); |
394 | mutex_destroy(&ce_app->app_mutex); | 394 | nvgpu_mutex_destroy(&ce_app->app_mutex); |
395 | } | 395 | } |
396 | 396 | ||
397 | void gk20a_ce_suspend(struct gk20a *g) | 397 | void gk20a_ce_suspend(struct gk20a *g) |
@@ -428,7 +428,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, | |||
428 | if (!ce_ctx) | 428 | if (!ce_ctx) |
429 | return ctx_id; | 429 | return ctx_id; |
430 | 430 | ||
431 | mutex_init(&ce_ctx->gpu_ctx_mutex); | 431 | nvgpu_mutex_init(&ce_ctx->gpu_ctx_mutex); |
432 | 432 | ||
433 | ce_ctx->g = g; | 433 | ce_ctx->g = g; |
434 | ce_ctx->dev = g->dev; | 434 | ce_ctx->dev = g->dev; |
@@ -508,20 +508,20 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, | |||
508 | } | 508 | } |
509 | } | 509 | } |
510 | 510 | ||
511 | mutex_lock(&ce_app->app_mutex); | 511 | nvgpu_mutex_acquire(&ce_app->app_mutex); |
512 | ctx_id = ce_ctx->ctx_id = ce_app->next_ctx_id; | 512 | ctx_id = ce_ctx->ctx_id = ce_app->next_ctx_id; |
513 | list_add(&ce_ctx->list, &ce_app->allocated_contexts); | 513 | list_add(&ce_ctx->list, &ce_app->allocated_contexts); |
514 | ++ce_app->next_ctx_id; | 514 | ++ce_app->next_ctx_id; |
515 | ++ce_app->ctx_count; | 515 | ++ce_app->ctx_count; |
516 | mutex_unlock(&ce_app->app_mutex); | 516 | nvgpu_mutex_release(&ce_app->app_mutex); |
517 | 517 | ||
518 | ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED; | 518 | ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED; |
519 | 519 | ||
520 | end: | 520 | end: |
521 | if (ctx_id == (u32)~0) { | 521 | if (ctx_id == (u32)~0) { |
522 | mutex_lock(&ce_app->app_mutex); | 522 | nvgpu_mutex_acquire(&ce_app->app_mutex); |
523 | gk20a_ce_delete_gpu_context(ce_ctx); | 523 | gk20a_ce_delete_gpu_context(ce_ctx); |
524 | mutex_unlock(&ce_app->app_mutex); | 524 | nvgpu_mutex_release(&ce_app->app_mutex); |
525 | } | 525 | } |
526 | return ctx_id; | 526 | return ctx_id; |
527 | 527 | ||
@@ -558,7 +558,7 @@ int gk20a_ce_execute_ops(struct device *dev, | |||
558 | if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE) | 558 | if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE) |
559 | goto end; | 559 | goto end; |
560 | 560 | ||
561 | mutex_lock(&ce_app->app_mutex); | 561 | nvgpu_mutex_acquire(&ce_app->app_mutex); |
562 | 562 | ||
563 | list_for_each_entry_safe(ce_ctx, ce_ctx_save, | 563 | list_for_each_entry_safe(ce_ctx, ce_ctx_save, |
564 | &ce_app->allocated_contexts, list) { | 564 | &ce_app->allocated_contexts, list) { |
@@ -568,7 +568,7 @@ int gk20a_ce_execute_ops(struct device *dev, | |||
568 | } | 568 | } |
569 | } | 569 | } |
570 | 570 | ||
571 | mutex_unlock(&ce_app->app_mutex); | 571 | nvgpu_mutex_release(&ce_app->app_mutex); |
572 | 572 | ||
573 | if (!found) { | 573 | if (!found) { |
574 | ret = -EINVAL; | 574 | ret = -EINVAL; |
@@ -580,7 +580,7 @@ int gk20a_ce_execute_ops(struct device *dev, | |||
580 | goto end; | 580 | goto end; |
581 | } | 581 | } |
582 | 582 | ||
583 | mutex_lock(&ce_ctx->gpu_ctx_mutex); | 583 | nvgpu_mutex_acquire(&ce_ctx->gpu_ctx_mutex); |
584 | 584 | ||
585 | ce_ctx->cmd_buf_read_queue_offset %= ce_ctx->cmd_buf_end_queue_offset; | 585 | ce_ctx->cmd_buf_read_queue_offset %= ce_ctx->cmd_buf_end_queue_offset; |
586 | 586 | ||
@@ -672,7 +672,7 @@ int gk20a_ce_execute_ops(struct device *dev, | |||
672 | } else | 672 | } else |
673 | ret = -ENOMEM; | 673 | ret = -ENOMEM; |
674 | noop: | 674 | noop: |
675 | mutex_unlock(&ce_ctx->gpu_ctx_mutex); | 675 | nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex); |
676 | end: | 676 | end: |
677 | return ret; | 677 | return ret; |
678 | } | 678 | } |
@@ -688,7 +688,7 @@ void gk20a_ce_delete_context(struct device *dev, | |||
688 | if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE) | 688 | if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE) |
689 | return; | 689 | return; |
690 | 690 | ||
691 | mutex_lock(&ce_app->app_mutex); | 691 | nvgpu_mutex_acquire(&ce_app->app_mutex); |
692 | 692 | ||
693 | list_for_each_entry_safe(ce_ctx, ce_ctx_save, | 693 | list_for_each_entry_safe(ce_ctx, ce_ctx_save, |
694 | &ce_app->allocated_contexts, list) { | 694 | &ce_app->allocated_contexts, list) { |
@@ -699,7 +699,7 @@ void gk20a_ce_delete_context(struct device *dev, | |||
699 | } | 699 | } |
700 | } | 700 | } |
701 | 701 | ||
702 | mutex_unlock(&ce_app->app_mutex); | 702 | nvgpu_mutex_release(&ce_app->app_mutex); |
703 | return; | 703 | return; |
704 | } | 704 | } |
705 | EXPORT_SYMBOL(gk20a_ce_delete_context); | 705 | EXPORT_SYMBOL(gk20a_ce_delete_context); |
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h index 3b53834d..1bb25dd1 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * GK20A graphics copy engine (gr host) | 4 | * GK20A graphics copy engine (gr host) |
5 | * | 5 | * |
6 | * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. | 6 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms and conditions of the GNU General Public License, | 9 | * under the terms and conditions of the GNU General Public License, |
@@ -88,7 +88,7 @@ enum { | |||
88 | /* global ce app db */ | 88 | /* global ce app db */ |
89 | struct gk20a_ce_app { | 89 | struct gk20a_ce_app { |
90 | bool initialised; | 90 | bool initialised; |
91 | struct mutex app_mutex; | 91 | struct nvgpu_mutex app_mutex; |
92 | int app_state; | 92 | int app_state; |
93 | 93 | ||
94 | struct list_head allocated_contexts; | 94 | struct list_head allocated_contexts; |
@@ -101,7 +101,7 @@ struct gk20a_gpu_ctx { | |||
101 | struct gk20a *g; | 101 | struct gk20a *g; |
102 | struct device *dev; | 102 | struct device *dev; |
103 | u32 ctx_id; | 103 | u32 ctx_id; |
104 | struct mutex gpu_ctx_mutex; | 104 | struct nvgpu_mutex gpu_ctx_mutex; |
105 | int gpu_ctx_state; | 105 | int gpu_ctx_state; |
106 | ce_event_callback user_event_callback; | 106 | ce_event_callback user_event_callback; |
107 | 107 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index 376a64b0..83a3a523 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |||
@@ -102,7 +102,7 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f) | |||
102 | 102 | ||
103 | platform = gk20a_get_platform(f->g->dev); | 103 | platform = gk20a_get_platform(f->g->dev); |
104 | 104 | ||
105 | mutex_lock(&f->free_chs_mutex); | 105 | nvgpu_mutex_acquire(&f->free_chs_mutex); |
106 | if (!list_empty(&f->free_chs)) { | 106 | if (!list_empty(&f->free_chs)) { |
107 | ch = list_first_entry(&f->free_chs, struct channel_gk20a, | 107 | ch = list_first_entry(&f->free_chs, struct channel_gk20a, |
108 | free_chs); | 108 | free_chs); |
@@ -111,7 +111,7 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f) | |||
111 | WARN_ON(ch->referenceable); | 111 | WARN_ON(ch->referenceable); |
112 | f->used_channels++; | 112 | f->used_channels++; |
113 | } | 113 | } |
114 | mutex_unlock(&f->free_chs_mutex); | 114 | nvgpu_mutex_release(&f->free_chs_mutex); |
115 | 115 | ||
116 | if (platform->aggressive_sync_destroy_thresh && | 116 | if (platform->aggressive_sync_destroy_thresh && |
117 | (f->used_channels > | 117 | (f->used_channels > |
@@ -128,11 +128,11 @@ static void free_channel(struct fifo_gk20a *f, | |||
128 | 128 | ||
129 | trace_gk20a_release_used_channel(ch->hw_chid); | 129 | trace_gk20a_release_used_channel(ch->hw_chid); |
130 | /* refcount is zero here and channel is in a freed/dead state */ | 130 | /* refcount is zero here and channel is in a freed/dead state */ |
131 | mutex_lock(&f->free_chs_mutex); | 131 | nvgpu_mutex_acquire(&f->free_chs_mutex); |
132 | /* add to head to increase visibility of timing-related bugs */ | 132 | /* add to head to increase visibility of timing-related bugs */ |
133 | list_add(&ch->free_chs, &f->free_chs); | 133 | list_add(&ch->free_chs, &f->free_chs); |
134 | f->used_channels--; | 134 | f->used_channels--; |
135 | mutex_unlock(&f->free_chs_mutex); | 135 | nvgpu_mutex_release(&f->free_chs_mutex); |
136 | 136 | ||
137 | if (platform->aggressive_sync_destroy_thresh && | 137 | if (platform->aggressive_sync_destroy_thresh && |
138 | (f->used_channels < | 138 | (f->used_channels < |
@@ -494,10 +494,10 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch) | |||
494 | gk20a_channel_cancel_job_clean_up(ch, true); | 494 | gk20a_channel_cancel_job_clean_up(ch, true); |
495 | 495 | ||
496 | /* ensure no fences are pending */ | 496 | /* ensure no fences are pending */ |
497 | mutex_lock(&ch->sync_lock); | 497 | nvgpu_mutex_acquire(&ch->sync_lock); |
498 | if (ch->sync) | 498 | if (ch->sync) |
499 | ch->sync->set_min_eq_max(ch->sync); | 499 | ch->sync->set_min_eq_max(ch->sync); |
500 | mutex_unlock(&ch->sync_lock); | 500 | nvgpu_mutex_release(&ch->sync_lock); |
501 | 501 | ||
502 | /* release all job semaphores (applies only to jobs that use | 502 | /* release all job semaphores (applies only to jobs that use |
503 | semaphore synchronization) */ | 503 | semaphore synchronization) */ |
@@ -595,7 +595,7 @@ void gk20a_disable_channel(struct channel_gk20a *ch) | |||
595 | static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch) | 595 | static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch) |
596 | { | 596 | { |
597 | /* disable existing cyclestats buffer */ | 597 | /* disable existing cyclestats buffer */ |
598 | mutex_lock(&ch->cyclestate.cyclestate_buffer_mutex); | 598 | nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex); |
599 | if (ch->cyclestate.cyclestate_buffer_handler) { | 599 | if (ch->cyclestate.cyclestate_buffer_handler) { |
600 | dma_buf_vunmap(ch->cyclestate.cyclestate_buffer_handler, | 600 | dma_buf_vunmap(ch->cyclestate.cyclestate_buffer_handler, |
601 | ch->cyclestate.cyclestate_buffer); | 601 | ch->cyclestate.cyclestate_buffer); |
@@ -604,7 +604,7 @@ static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch) | |||
604 | ch->cyclestate.cyclestate_buffer = NULL; | 604 | ch->cyclestate.cyclestate_buffer = NULL; |
605 | ch->cyclestate.cyclestate_buffer_size = 0; | 605 | ch->cyclestate.cyclestate_buffer_size = 0; |
606 | } | 606 | } |
607 | mutex_unlock(&ch->cyclestate.cyclestate_buffer_mutex); | 607 | nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); |
608 | } | 608 | } |
609 | 609 | ||
610 | static int gk20a_channel_cycle_stats(struct channel_gk20a *ch, | 610 | static int gk20a_channel_cycle_stats(struct channel_gk20a *ch, |
@@ -654,12 +654,12 @@ static int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch) | |||
654 | { | 654 | { |
655 | int ret; | 655 | int ret; |
656 | 656 | ||
657 | mutex_lock(&ch->cs_client_mutex); | 657 | nvgpu_mutex_acquire(&ch->cs_client_mutex); |
658 | if (ch->cs_client) | 658 | if (ch->cs_client) |
659 | ret = gr_gk20a_css_flush(ch, ch->cs_client); | 659 | ret = gr_gk20a_css_flush(ch, ch->cs_client); |
660 | else | 660 | else |
661 | ret = -EBADF; | 661 | ret = -EBADF; |
662 | mutex_unlock(&ch->cs_client_mutex); | 662 | nvgpu_mutex_release(&ch->cs_client_mutex); |
663 | 663 | ||
664 | return ret; | 664 | return ret; |
665 | } | 665 | } |
@@ -671,7 +671,7 @@ static int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch, | |||
671 | { | 671 | { |
672 | int ret; | 672 | int ret; |
673 | 673 | ||
674 | mutex_lock(&ch->cs_client_mutex); | 674 | nvgpu_mutex_acquire(&ch->cs_client_mutex); |
675 | if (ch->cs_client) { | 675 | if (ch->cs_client) { |
676 | ret = -EEXIST; | 676 | ret = -EEXIST; |
677 | } else { | 677 | } else { |
@@ -681,7 +681,7 @@ static int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch, | |||
681 | perfmon_id_start, | 681 | perfmon_id_start, |
682 | &ch->cs_client); | 682 | &ch->cs_client); |
683 | } | 683 | } |
684 | mutex_unlock(&ch->cs_client_mutex); | 684 | nvgpu_mutex_release(&ch->cs_client_mutex); |
685 | 685 | ||
686 | return ret; | 686 | return ret; |
687 | } | 687 | } |
@@ -690,14 +690,14 @@ static int gk20a_free_cycle_stats_snapshot(struct channel_gk20a *ch) | |||
690 | { | 690 | { |
691 | int ret; | 691 | int ret; |
692 | 692 | ||
693 | mutex_lock(&ch->cs_client_mutex); | 693 | nvgpu_mutex_acquire(&ch->cs_client_mutex); |
694 | if (ch->cs_client) { | 694 | if (ch->cs_client) { |
695 | ret = gr_gk20a_css_detach(ch, ch->cs_client); | 695 | ret = gr_gk20a_css_detach(ch, ch->cs_client); |
696 | ch->cs_client = NULL; | 696 | ch->cs_client = NULL; |
697 | } else { | 697 | } else { |
698 | ret = 0; | 698 | ret = 0; |
699 | } | 699 | } |
700 | mutex_unlock(&ch->cs_client_mutex); | 700 | nvgpu_mutex_release(&ch->cs_client_mutex); |
701 | 701 | ||
702 | return ret; | 702 | return ret; |
703 | } | 703 | } |
@@ -824,9 +824,9 @@ static int gk20a_init_error_notifier(struct channel_gk20a *ch, | |||
824 | memset(ch->error_notifier, 0, sizeof(struct nvgpu_notification)); | 824 | memset(ch->error_notifier, 0, sizeof(struct nvgpu_notification)); |
825 | 825 | ||
826 | /* set channel notifiers pointer */ | 826 | /* set channel notifiers pointer */ |
827 | mutex_lock(&ch->error_notifier_mutex); | 827 | nvgpu_mutex_acquire(&ch->error_notifier_mutex); |
828 | ch->error_notifier_ref = dmabuf; | 828 | ch->error_notifier_ref = dmabuf; |
829 | mutex_unlock(&ch->error_notifier_mutex); | 829 | nvgpu_mutex_release(&ch->error_notifier_mutex); |
830 | 830 | ||
831 | return 0; | 831 | return 0; |
832 | } | 832 | } |
@@ -857,14 +857,14 @@ void gk20a_set_error_notifier_locked(struct channel_gk20a *ch, __u32 error) | |||
857 | 857 | ||
858 | void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error) | 858 | void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error) |
859 | { | 859 | { |
860 | mutex_lock(&ch->error_notifier_mutex); | 860 | nvgpu_mutex_acquire(&ch->error_notifier_mutex); |
861 | gk20a_set_error_notifier_locked(ch, error); | 861 | gk20a_set_error_notifier_locked(ch, error); |
862 | mutex_unlock(&ch->error_notifier_mutex); | 862 | nvgpu_mutex_release(&ch->error_notifier_mutex); |
863 | } | 863 | } |
864 | 864 | ||
865 | static void gk20a_free_error_notifiers(struct channel_gk20a *ch) | 865 | static void gk20a_free_error_notifiers(struct channel_gk20a *ch) |
866 | { | 866 | { |
867 | mutex_lock(&ch->error_notifier_mutex); | 867 | nvgpu_mutex_acquire(&ch->error_notifier_mutex); |
868 | if (ch->error_notifier_ref) { | 868 | if (ch->error_notifier_ref) { |
869 | dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va); | 869 | dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va); |
870 | dma_buf_put(ch->error_notifier_ref); | 870 | dma_buf_put(ch->error_notifier_ref); |
@@ -872,7 +872,7 @@ static void gk20a_free_error_notifiers(struct channel_gk20a *ch) | |||
872 | ch->error_notifier = NULL; | 872 | ch->error_notifier = NULL; |
873 | ch->error_notifier_va = NULL; | 873 | ch->error_notifier_va = NULL; |
874 | } | 874 | } |
875 | mutex_unlock(&ch->error_notifier_mutex); | 875 | nvgpu_mutex_release(&ch->error_notifier_mutex); |
876 | } | 876 | } |
877 | 877 | ||
878 | static void gk20a_wait_until_counter_is_N( | 878 | static void gk20a_wait_until_counter_is_N( |
@@ -927,16 +927,16 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) | |||
927 | nvgpu_wait_for_deferred_interrupts(g); | 927 | nvgpu_wait_for_deferred_interrupts(g); |
928 | 928 | ||
929 | /* prevent new refs */ | 929 | /* prevent new refs */ |
930 | spin_lock(&ch->ref_obtain_lock); | 930 | nvgpu_spinlock_acquire(&ch->ref_obtain_lock); |
931 | if (!ch->referenceable) { | 931 | if (!ch->referenceable) { |
932 | spin_unlock(&ch->ref_obtain_lock); | 932 | nvgpu_spinlock_release(&ch->ref_obtain_lock); |
933 | gk20a_err(dev_from_gk20a(ch->g), | 933 | gk20a_err(dev_from_gk20a(ch->g), |
934 | "Extra %s() called to channel %u", | 934 | "Extra %s() called to channel %u", |
935 | __func__, ch->hw_chid); | 935 | __func__, ch->hw_chid); |
936 | return; | 936 | return; |
937 | } | 937 | } |
938 | ch->referenceable = false; | 938 | ch->referenceable = false; |
939 | spin_unlock(&ch->ref_obtain_lock); | 939 | nvgpu_spinlock_release(&ch->ref_obtain_lock); |
940 | 940 | ||
941 | /* matches with the initial reference in gk20a_open_new_channel() */ | 941 | /* matches with the initial reference in gk20a_open_new_channel() */ |
942 | atomic_dec(&ch->ref_count); | 942 | atomic_dec(&ch->ref_count); |
@@ -948,18 +948,18 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) | |||
948 | __func__, "references"); | 948 | __func__, "references"); |
949 | 949 | ||
950 | /* if engine reset was deferred, perform it now */ | 950 | /* if engine reset was deferred, perform it now */ |
951 | mutex_lock(&f->deferred_reset_mutex); | 951 | nvgpu_mutex_acquire(&f->deferred_reset_mutex); |
952 | if (g->fifo.deferred_reset_pending) { | 952 | if (g->fifo.deferred_reset_pending) { |
953 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" | 953 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was" |
954 | " deferred, running now"); | 954 | " deferred, running now"); |
955 | /* if lock is already taken, a reset is taking place | 955 | /* if lock is already taken, a reset is taking place |
956 | so no need to repeat */ | 956 | so no need to repeat */ |
957 | if (mutex_trylock(&g->fifo.gr_reset_mutex)) { | 957 | if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) { |
958 | gk20a_fifo_deferred_reset(g, ch); | 958 | gk20a_fifo_deferred_reset(g, ch); |
959 | mutex_unlock(&g->fifo.gr_reset_mutex); | 959 | nvgpu_mutex_release(&g->fifo.gr_reset_mutex); |
960 | } | 960 | } |
961 | } | 961 | } |
962 | mutex_unlock(&f->deferred_reset_mutex); | 962 | nvgpu_mutex_release(&f->deferred_reset_mutex); |
963 | 963 | ||
964 | if (!gk20a_channel_as_bound(ch)) | 964 | if (!gk20a_channel_as_bound(ch)) |
965 | goto unbind; | 965 | goto unbind; |
@@ -991,12 +991,12 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) | |||
991 | channel_gk20a_free_priv_cmdbuf(ch); | 991 | channel_gk20a_free_priv_cmdbuf(ch); |
992 | 992 | ||
993 | /* sync must be destroyed before releasing channel vm */ | 993 | /* sync must be destroyed before releasing channel vm */ |
994 | mutex_lock(&ch->sync_lock); | 994 | nvgpu_mutex_acquire(&ch->sync_lock); |
995 | if (ch->sync) { | 995 | if (ch->sync) { |
996 | gk20a_channel_sync_destroy(ch->sync); | 996 | gk20a_channel_sync_destroy(ch->sync); |
997 | ch->sync = NULL; | 997 | ch->sync = NULL; |
998 | } | 998 | } |
999 | mutex_unlock(&ch->sync_lock); | 999 | nvgpu_mutex_release(&ch->sync_lock); |
1000 | 1000 | ||
1001 | /* | 1001 | /* |
1002 | * free the channel used semaphore index. | 1002 | * free the channel used semaphore index. |
@@ -1011,10 +1011,10 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) | |||
1011 | */ | 1011 | */ |
1012 | gk20a_vm_put(ch_vm); | 1012 | gk20a_vm_put(ch_vm); |
1013 | 1013 | ||
1014 | spin_lock(&ch->update_fn_lock); | 1014 | nvgpu_spinlock_acquire(&ch->update_fn_lock); |
1015 | ch->update_fn = NULL; | 1015 | ch->update_fn = NULL; |
1016 | ch->update_fn_data = NULL; | 1016 | ch->update_fn_data = NULL; |
1017 | spin_unlock(&ch->update_fn_lock); | 1017 | nvgpu_spinlock_release(&ch->update_fn_lock); |
1018 | cancel_work_sync(&ch->update_fn_work); | 1018 | cancel_work_sync(&ch->update_fn_work); |
1019 | cancel_delayed_work_sync(&ch->clean_up.wq); | 1019 | cancel_delayed_work_sync(&ch->clean_up.wq); |
1020 | cancel_delayed_work_sync(&ch->timeout.wq); | 1020 | cancel_delayed_work_sync(&ch->timeout.wq); |
@@ -1037,21 +1037,21 @@ unbind: | |||
1037 | WARN_ON(ch->sync); | 1037 | WARN_ON(ch->sync); |
1038 | 1038 | ||
1039 | /* unlink all debug sessions */ | 1039 | /* unlink all debug sessions */ |
1040 | mutex_lock(&g->dbg_sessions_lock); | 1040 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1041 | 1041 | ||
1042 | list_for_each_entry_safe(session_data, tmp_s, | 1042 | list_for_each_entry_safe(session_data, tmp_s, |
1043 | &ch->dbg_s_list, dbg_s_entry) { | 1043 | &ch->dbg_s_list, dbg_s_entry) { |
1044 | dbg_s = session_data->dbg_s; | 1044 | dbg_s = session_data->dbg_s; |
1045 | mutex_lock(&dbg_s->ch_list_lock); | 1045 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
1046 | list_for_each_entry_safe(ch_data, tmp, | 1046 | list_for_each_entry_safe(ch_data, tmp, |
1047 | &dbg_s->ch_list, ch_entry) { | 1047 | &dbg_s->ch_list, ch_entry) { |
1048 | if (ch_data->chid == ch->hw_chid) | 1048 | if (ch_data->chid == ch->hw_chid) |
1049 | dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | 1049 | dbg_unbind_single_channel_gk20a(dbg_s, ch_data); |
1050 | } | 1050 | } |
1051 | mutex_unlock(&dbg_s->ch_list_lock); | 1051 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | mutex_unlock(&g->dbg_sessions_lock); | 1054 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1055 | 1055 | ||
1056 | /* free pre-allocated resources, if applicable */ | 1056 | /* free pre-allocated resources, if applicable */ |
1057 | if (channel_gk20a_is_prealloc_enabled(ch)) | 1057 | if (channel_gk20a_is_prealloc_enabled(ch)) |
@@ -1079,7 +1079,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch) | |||
1079 | unsigned long prev_jiffies = 0; | 1079 | unsigned long prev_jiffies = 0; |
1080 | struct device *dev = dev_from_gk20a(ch->g); | 1080 | struct device *dev = dev_from_gk20a(ch->g); |
1081 | 1081 | ||
1082 | spin_lock(&ch->ref_actions_lock); | 1082 | nvgpu_spinlock_acquire(&ch->ref_actions_lock); |
1083 | 1083 | ||
1084 | dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n", | 1084 | dev_info(dev, "ch %d: refs %d. Actions, most recent last:\n", |
1085 | ch->hw_chid, atomic_read(&ch->ref_count)); | 1085 | ch->hw_chid, atomic_read(&ch->ref_count)); |
@@ -1109,7 +1109,7 @@ static void gk20a_channel_dump_ref_actions(struct channel_gk20a *ch) | |||
1109 | get = (get + 1) % GK20A_CHANNEL_REFCOUNT_TRACKING; | 1109 | get = (get + 1) % GK20A_CHANNEL_REFCOUNT_TRACKING; |
1110 | } | 1110 | } |
1111 | 1111 | ||
1112 | spin_unlock(&ch->ref_actions_lock); | 1112 | nvgpu_spinlock_release(&ch->ref_actions_lock); |
1113 | #endif | 1113 | #endif |
1114 | } | 1114 | } |
1115 | 1115 | ||
@@ -1119,7 +1119,7 @@ static void gk20a_channel_save_ref_source(struct channel_gk20a *ch, | |||
1119 | #if GK20A_CHANNEL_REFCOUNT_TRACKING | 1119 | #if GK20A_CHANNEL_REFCOUNT_TRACKING |
1120 | struct channel_gk20a_ref_action *act; | 1120 | struct channel_gk20a_ref_action *act; |
1121 | 1121 | ||
1122 | spin_lock(&ch->ref_actions_lock); | 1122 | nvgpu_spinlock_acquire(&ch->ref_actions_lock); |
1123 | 1123 | ||
1124 | act = &ch->ref_actions[ch->ref_actions_put]; | 1124 | act = &ch->ref_actions[ch->ref_actions_put]; |
1125 | act->type = type; | 1125 | act->type = type; |
@@ -1132,7 +1132,7 @@ static void gk20a_channel_save_ref_source(struct channel_gk20a *ch, | |||
1132 | ch->ref_actions_put = (ch->ref_actions_put + 1) % | 1132 | ch->ref_actions_put = (ch->ref_actions_put + 1) % |
1133 | GK20A_CHANNEL_REFCOUNT_TRACKING; | 1133 | GK20A_CHANNEL_REFCOUNT_TRACKING; |
1134 | 1134 | ||
1135 | spin_unlock(&ch->ref_actions_lock); | 1135 | nvgpu_spinlock_release(&ch->ref_actions_lock); |
1136 | #endif | 1136 | #endif |
1137 | } | 1137 | } |
1138 | 1138 | ||
@@ -1152,7 +1152,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch, | |||
1152 | const char *caller) { | 1152 | const char *caller) { |
1153 | struct channel_gk20a *ret; | 1153 | struct channel_gk20a *ret; |
1154 | 1154 | ||
1155 | spin_lock(&ch->ref_obtain_lock); | 1155 | nvgpu_spinlock_acquire(&ch->ref_obtain_lock); |
1156 | 1156 | ||
1157 | if (likely(ch->referenceable)) { | 1157 | if (likely(ch->referenceable)) { |
1158 | gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get); | 1158 | gk20a_channel_save_ref_source(ch, channel_gk20a_ref_action_get); |
@@ -1161,7 +1161,7 @@ struct channel_gk20a *_gk20a_channel_get(struct channel_gk20a *ch, | |||
1161 | } else | 1161 | } else |
1162 | ret = NULL; | 1162 | ret = NULL; |
1163 | 1163 | ||
1164 | spin_unlock(&ch->ref_obtain_lock); | 1164 | nvgpu_spinlock_release(&ch->ref_obtain_lock); |
1165 | 1165 | ||
1166 | if (ret) | 1166 | if (ret) |
1167 | trace_gk20a_channel_get(ch->hw_chid, caller); | 1167 | trace_gk20a_channel_get(ch->hw_chid, caller); |
@@ -1250,10 +1250,10 @@ static void gk20a_channel_update_runcb_fn(struct work_struct *work) | |||
1250 | void (*update_fn)(struct channel_gk20a *, void *); | 1250 | void (*update_fn)(struct channel_gk20a *, void *); |
1251 | void *update_fn_data; | 1251 | void *update_fn_data; |
1252 | 1252 | ||
1253 | spin_lock(&ch->update_fn_lock); | 1253 | nvgpu_spinlock_acquire(&ch->update_fn_lock); |
1254 | update_fn = ch->update_fn; | 1254 | update_fn = ch->update_fn; |
1255 | update_fn_data = ch->update_fn_data; | 1255 | update_fn_data = ch->update_fn_data; |
1256 | spin_unlock(&ch->update_fn_lock); | 1256 | nvgpu_spinlock_release(&ch->update_fn_lock); |
1257 | 1257 | ||
1258 | if (update_fn) | 1258 | if (update_fn) |
1259 | update_fn(ch, update_fn_data); | 1259 | update_fn(ch, update_fn_data); |
@@ -1268,10 +1268,10 @@ struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g, | |||
1268 | struct channel_gk20a *ch = gk20a_open_new_channel(g, runlist_id, is_privileged_channel); | 1268 | struct channel_gk20a *ch = gk20a_open_new_channel(g, runlist_id, is_privileged_channel); |
1269 | 1269 | ||
1270 | if (ch) { | 1270 | if (ch) { |
1271 | spin_lock(&ch->update_fn_lock); | 1271 | nvgpu_spinlock_acquire(&ch->update_fn_lock); |
1272 | ch->update_fn = update_fn; | 1272 | ch->update_fn = update_fn; |
1273 | ch->update_fn_data = update_fn_data; | 1273 | ch->update_fn_data = update_fn_data; |
1274 | spin_unlock(&ch->update_fn_lock); | 1274 | nvgpu_spinlock_release(&ch->update_fn_lock); |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | return ch; | 1277 | return ch; |
@@ -1325,13 +1325,13 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, | |||
1325 | ch->tgid = current->tgid; /* process granularity for FECS traces */ | 1325 | ch->tgid = current->tgid; /* process granularity for FECS traces */ |
1326 | 1326 | ||
1327 | /* unhook all events created on this channel */ | 1327 | /* unhook all events created on this channel */ |
1328 | mutex_lock(&ch->event_id_list_lock); | 1328 | nvgpu_mutex_acquire(&ch->event_id_list_lock); |
1329 | list_for_each_entry_safe(event_id_data, event_id_data_temp, | 1329 | list_for_each_entry_safe(event_id_data, event_id_data_temp, |
1330 | &ch->event_id_list, | 1330 | &ch->event_id_list, |
1331 | event_id_node) { | 1331 | event_id_node) { |
1332 | list_del_init(&event_id_data->event_id_node); | 1332 | list_del_init(&event_id_data->event_id_node); |
1333 | } | 1333 | } |
1334 | mutex_unlock(&ch->event_id_list_lock); | 1334 | nvgpu_mutex_release(&ch->event_id_list_lock); |
1335 | 1335 | ||
1336 | /* By default, channel is regular (non-TSG) channel */ | 1336 | /* By default, channel is regular (non-TSG) channel */ |
1337 | ch->tsgid = NVGPU_INVALID_TSG_ID; | 1337 | ch->tsgid = NVGPU_INVALID_TSG_ID; |
@@ -1357,7 +1357,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, | |||
1357 | 1357 | ||
1358 | ch->update_fn = NULL; | 1358 | ch->update_fn = NULL; |
1359 | ch->update_fn_data = NULL; | 1359 | ch->update_fn_data = NULL; |
1360 | spin_lock_init(&ch->update_fn_lock); | 1360 | nvgpu_spinlock_init(&ch->update_fn_lock); |
1361 | INIT_WORK(&ch->update_fn_work, gk20a_channel_update_runcb_fn); | 1361 | INIT_WORK(&ch->update_fn_work, gk20a_channel_update_runcb_fn); |
1362 | 1362 | ||
1363 | /* Mark the channel alive, get-able, with 1 initial use | 1363 | /* Mark the channel alive, get-able, with 1 initial use |
@@ -1652,17 +1652,17 @@ static void channel_gk20a_free_job(struct channel_gk20a *c, | |||
1652 | void channel_gk20a_joblist_lock(struct channel_gk20a *c) | 1652 | void channel_gk20a_joblist_lock(struct channel_gk20a *c) |
1653 | { | 1653 | { |
1654 | if (channel_gk20a_is_prealloc_enabled(c)) | 1654 | if (channel_gk20a_is_prealloc_enabled(c)) |
1655 | mutex_lock(&c->joblist.pre_alloc.read_lock); | 1655 | nvgpu_mutex_acquire(&c->joblist.pre_alloc.read_lock); |
1656 | else | 1656 | else |
1657 | spin_lock(&c->joblist.dynamic.lock); | 1657 | nvgpu_spinlock_acquire(&c->joblist.dynamic.lock); |
1658 | } | 1658 | } |
1659 | 1659 | ||
1660 | void channel_gk20a_joblist_unlock(struct channel_gk20a *c) | 1660 | void channel_gk20a_joblist_unlock(struct channel_gk20a *c) |
1661 | { | 1661 | { |
1662 | if (channel_gk20a_is_prealloc_enabled(c)) | 1662 | if (channel_gk20a_is_prealloc_enabled(c)) |
1663 | mutex_unlock(&c->joblist.pre_alloc.read_lock); | 1663 | nvgpu_mutex_release(&c->joblist.pre_alloc.read_lock); |
1664 | else | 1664 | else |
1665 | spin_unlock(&c->joblist.dynamic.lock); | 1665 | nvgpu_spinlock_release(&c->joblist.dynamic.lock); |
1666 | } | 1666 | } |
1667 | 1667 | ||
1668 | static struct channel_gk20a_job *channel_gk20a_joblist_peek( | 1668 | static struct channel_gk20a_job *channel_gk20a_joblist_peek( |
@@ -1871,14 +1871,14 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c, | |||
1871 | channel_gk20a_setup_userd(c); | 1871 | channel_gk20a_setup_userd(c); |
1872 | 1872 | ||
1873 | if (!platform->aggressive_sync_destroy_thresh) { | 1873 | if (!platform->aggressive_sync_destroy_thresh) { |
1874 | mutex_lock(&c->sync_lock); | 1874 | nvgpu_mutex_acquire(&c->sync_lock); |
1875 | c->sync = gk20a_channel_sync_create(c); | 1875 | c->sync = gk20a_channel_sync_create(c); |
1876 | if (!c->sync) { | 1876 | if (!c->sync) { |
1877 | err = -ENOMEM; | 1877 | err = -ENOMEM; |
1878 | mutex_unlock(&c->sync_lock); | 1878 | nvgpu_mutex_release(&c->sync_lock); |
1879 | goto clean_up_unmap; | 1879 | goto clean_up_unmap; |
1880 | } | 1880 | } |
1881 | mutex_unlock(&c->sync_lock); | 1881 | nvgpu_mutex_release(&c->sync_lock); |
1882 | 1882 | ||
1883 | if (g->ops.fifo.resetup_ramfc) { | 1883 | if (g->ops.fifo.resetup_ramfc) { |
1884 | err = g->ops.fifo.resetup_ramfc(c); | 1884 | err = g->ops.fifo.resetup_ramfc(c); |
@@ -2085,16 +2085,16 @@ static void gk20a_channel_timeout_start(struct channel_gk20a *ch) | |||
2085 | if (!ch->wdt_enabled) | 2085 | if (!ch->wdt_enabled) |
2086 | return; | 2086 | return; |
2087 | 2087 | ||
2088 | raw_spin_lock(&ch->timeout.lock); | 2088 | nvgpu_raw_spinlock_acquire(&ch->timeout.lock); |
2089 | 2089 | ||
2090 | if (ch->timeout.initialized) { | 2090 | if (ch->timeout.initialized) { |
2091 | raw_spin_unlock(&ch->timeout.lock); | 2091 | nvgpu_raw_spinlock_release(&ch->timeout.lock); |
2092 | return; | 2092 | return; |
2093 | } | 2093 | } |
2094 | 2094 | ||
2095 | ch->timeout.gp_get = gk20a_userd_gp_get(ch->g, ch); | 2095 | ch->timeout.gp_get = gk20a_userd_gp_get(ch->g, ch); |
2096 | ch->timeout.initialized = true; | 2096 | ch->timeout.initialized = true; |
2097 | raw_spin_unlock(&ch->timeout.lock); | 2097 | nvgpu_raw_spinlock_release(&ch->timeout.lock); |
2098 | 2098 | ||
2099 | schedule_delayed_work(&ch->timeout.wq, | 2099 | schedule_delayed_work(&ch->timeout.wq, |
2100 | msecs_to_jiffies(gk20a_get_channel_watchdog_timeout(ch))); | 2100 | msecs_to_jiffies(gk20a_get_channel_watchdog_timeout(ch))); |
@@ -2102,18 +2102,18 @@ static void gk20a_channel_timeout_start(struct channel_gk20a *ch) | |||
2102 | 2102 | ||
2103 | static void gk20a_channel_timeout_stop(struct channel_gk20a *ch) | 2103 | static void gk20a_channel_timeout_stop(struct channel_gk20a *ch) |
2104 | { | 2104 | { |
2105 | raw_spin_lock(&ch->timeout.lock); | 2105 | nvgpu_raw_spinlock_acquire(&ch->timeout.lock); |
2106 | if (!ch->timeout.initialized) { | 2106 | if (!ch->timeout.initialized) { |
2107 | raw_spin_unlock(&ch->timeout.lock); | 2107 | nvgpu_raw_spinlock_release(&ch->timeout.lock); |
2108 | return; | 2108 | return; |
2109 | } | 2109 | } |
2110 | raw_spin_unlock(&ch->timeout.lock); | 2110 | nvgpu_raw_spinlock_release(&ch->timeout.lock); |
2111 | 2111 | ||
2112 | cancel_delayed_work_sync(&ch->timeout.wq); | 2112 | cancel_delayed_work_sync(&ch->timeout.wq); |
2113 | 2113 | ||
2114 | raw_spin_lock(&ch->timeout.lock); | 2114 | nvgpu_raw_spinlock_acquire(&ch->timeout.lock); |
2115 | ch->timeout.initialized = false; | 2115 | ch->timeout.initialized = false; |
2116 | raw_spin_unlock(&ch->timeout.lock); | 2116 | nvgpu_raw_spinlock_release(&ch->timeout.lock); |
2117 | } | 2117 | } |
2118 | 2118 | ||
2119 | void gk20a_channel_timeout_restart_all_channels(struct gk20a *g) | 2119 | void gk20a_channel_timeout_restart_all_channels(struct gk20a *g) |
@@ -2125,13 +2125,13 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g) | |||
2125 | struct channel_gk20a *ch = &f->channel[chid]; | 2125 | struct channel_gk20a *ch = &f->channel[chid]; |
2126 | 2126 | ||
2127 | if (gk20a_channel_get(ch)) { | 2127 | if (gk20a_channel_get(ch)) { |
2128 | raw_spin_lock(&ch->timeout.lock); | 2128 | nvgpu_raw_spinlock_acquire(&ch->timeout.lock); |
2129 | if (!ch->timeout.initialized) { | 2129 | if (!ch->timeout.initialized) { |
2130 | raw_spin_unlock(&ch->timeout.lock); | 2130 | nvgpu_raw_spinlock_release(&ch->timeout.lock); |
2131 | gk20a_channel_put(ch); | 2131 | gk20a_channel_put(ch); |
2132 | continue; | 2132 | continue; |
2133 | } | 2133 | } |
2134 | raw_spin_unlock(&ch->timeout.lock); | 2134 | nvgpu_raw_spinlock_release(&ch->timeout.lock); |
2135 | 2135 | ||
2136 | cancel_delayed_work_sync(&ch->timeout.wq); | 2136 | cancel_delayed_work_sync(&ch->timeout.wq); |
2137 | if (!ch->has_timedout) | 2137 | if (!ch->has_timedout) |
@@ -2164,13 +2164,13 @@ static void gk20a_channel_timeout_handler(struct work_struct *work) | |||
2164 | } | 2164 | } |
2165 | 2165 | ||
2166 | /* Need global lock since multiple channels can timeout at a time */ | 2166 | /* Need global lock since multiple channels can timeout at a time */ |
2167 | mutex_lock(&g->ch_wdt_lock); | 2167 | nvgpu_mutex_acquire(&g->ch_wdt_lock); |
2168 | 2168 | ||
2169 | /* Get timed out job and reset the timer */ | 2169 | /* Get timed out job and reset the timer */ |
2170 | raw_spin_lock(&ch->timeout.lock); | 2170 | nvgpu_raw_spinlock_acquire(&ch->timeout.lock); |
2171 | gp_get = ch->timeout.gp_get; | 2171 | gp_get = ch->timeout.gp_get; |
2172 | ch->timeout.initialized = false; | 2172 | ch->timeout.initialized = false; |
2173 | raw_spin_unlock(&ch->timeout.lock); | 2173 | nvgpu_raw_spinlock_release(&ch->timeout.lock); |
2174 | 2174 | ||
2175 | if (gk20a_userd_gp_get(ch->g, ch) != gp_get) { | 2175 | if (gk20a_userd_gp_get(ch->g, ch) != gp_get) { |
2176 | gk20a_channel_timeout_start(ch); | 2176 | gk20a_channel_timeout_start(ch); |
@@ -2187,7 +2187,7 @@ static void gk20a_channel_timeout_handler(struct work_struct *work) | |||
2187 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT, true); | 2187 | NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT, true); |
2188 | 2188 | ||
2189 | fail_unlock: | 2189 | fail_unlock: |
2190 | mutex_unlock(&g->ch_wdt_lock); | 2190 | nvgpu_mutex_release(&g->ch_wdt_lock); |
2191 | gk20a_channel_put(ch); | 2191 | gk20a_channel_put(ch); |
2192 | gk20a_idle(dev_from_gk20a(g)); | 2192 | gk20a_idle(dev_from_gk20a(g)); |
2193 | } | 2193 | } |
@@ -2216,17 +2216,17 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e) | |||
2216 | 2216 | ||
2217 | static void gk20a_channel_schedule_job_clean_up(struct channel_gk20a *c) | 2217 | static void gk20a_channel_schedule_job_clean_up(struct channel_gk20a *c) |
2218 | { | 2218 | { |
2219 | mutex_lock(&c->clean_up.lock); | 2219 | nvgpu_mutex_acquire(&c->clean_up.lock); |
2220 | 2220 | ||
2221 | if (c->clean_up.scheduled) { | 2221 | if (c->clean_up.scheduled) { |
2222 | mutex_unlock(&c->clean_up.lock); | 2222 | nvgpu_mutex_release(&c->clean_up.lock); |
2223 | return; | 2223 | return; |
2224 | } | 2224 | } |
2225 | 2225 | ||
2226 | c->clean_up.scheduled = true; | 2226 | c->clean_up.scheduled = true; |
2227 | schedule_delayed_work(&c->clean_up.wq, 1); | 2227 | schedule_delayed_work(&c->clean_up.wq, 1); |
2228 | 2228 | ||
2229 | mutex_unlock(&c->clean_up.lock); | 2229 | nvgpu_mutex_release(&c->clean_up.lock); |
2230 | } | 2230 | } |
2231 | 2231 | ||
2232 | static void gk20a_channel_cancel_job_clean_up(struct channel_gk20a *c, | 2232 | static void gk20a_channel_cancel_job_clean_up(struct channel_gk20a *c, |
@@ -2235,9 +2235,9 @@ static void gk20a_channel_cancel_job_clean_up(struct channel_gk20a *c, | |||
2235 | if (wait_for_completion) | 2235 | if (wait_for_completion) |
2236 | cancel_delayed_work_sync(&c->clean_up.wq); | 2236 | cancel_delayed_work_sync(&c->clean_up.wq); |
2237 | 2237 | ||
2238 | mutex_lock(&c->clean_up.lock); | 2238 | nvgpu_mutex_acquire(&c->clean_up.lock); |
2239 | c->clean_up.scheduled = false; | 2239 | c->clean_up.scheduled = false; |
2240 | mutex_unlock(&c->clean_up.lock); | 2240 | nvgpu_mutex_release(&c->clean_up.lock); |
2241 | } | 2241 | } |
2242 | 2242 | ||
2243 | static int gk20a_channel_add_job(struct channel_gk20a *c, | 2243 | static int gk20a_channel_add_job(struct channel_gk20a *c, |
@@ -2353,13 +2353,13 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c, | |||
2353 | c->sync->signal_timeline(c->sync); | 2353 | c->sync->signal_timeline(c->sync); |
2354 | 2354 | ||
2355 | if (platform->aggressive_sync_destroy_thresh) { | 2355 | if (platform->aggressive_sync_destroy_thresh) { |
2356 | mutex_lock(&c->sync_lock); | 2356 | nvgpu_mutex_acquire(&c->sync_lock); |
2357 | if (atomic_dec_and_test(&c->sync->refcount) && | 2357 | if (atomic_dec_and_test(&c->sync->refcount) && |
2358 | platform->aggressive_sync_destroy) { | 2358 | platform->aggressive_sync_destroy) { |
2359 | gk20a_channel_sync_destroy(c->sync); | 2359 | gk20a_channel_sync_destroy(c->sync); |
2360 | c->sync = NULL; | 2360 | c->sync = NULL; |
2361 | } | 2361 | } |
2362 | mutex_unlock(&c->sync_lock); | 2362 | nvgpu_mutex_release(&c->sync_lock); |
2363 | } | 2363 | } |
2364 | } | 2364 | } |
2365 | 2365 | ||
@@ -2563,18 +2563,18 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c, | |||
2563 | need_sync_fence = true; | 2563 | need_sync_fence = true; |
2564 | 2564 | ||
2565 | if (platform->aggressive_sync_destroy_thresh) { | 2565 | if (platform->aggressive_sync_destroy_thresh) { |
2566 | mutex_lock(&c->sync_lock); | 2566 | nvgpu_mutex_acquire(&c->sync_lock); |
2567 | if (!c->sync) { | 2567 | if (!c->sync) { |
2568 | c->sync = gk20a_channel_sync_create(c); | 2568 | c->sync = gk20a_channel_sync_create(c); |
2569 | if (!c->sync) { | 2569 | if (!c->sync) { |
2570 | err = -ENOMEM; | 2570 | err = -ENOMEM; |
2571 | mutex_unlock(&c->sync_lock); | 2571 | nvgpu_mutex_release(&c->sync_lock); |
2572 | goto fail; | 2572 | goto fail; |
2573 | } | 2573 | } |
2574 | new_sync_created = true; | 2574 | new_sync_created = true; |
2575 | } | 2575 | } |
2576 | atomic_inc(&c->sync->refcount); | 2576 | atomic_inc(&c->sync->refcount); |
2577 | mutex_unlock(&c->sync_lock); | 2577 | nvgpu_mutex_release(&c->sync_lock); |
2578 | } | 2578 | } |
2579 | 2579 | ||
2580 | if (g->ops.fifo.resetup_ramfc && new_sync_created) { | 2580 | if (g->ops.fifo.resetup_ramfc && new_sync_created) { |
@@ -2920,31 +2920,31 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid) | |||
2920 | c->g = NULL; | 2920 | c->g = NULL; |
2921 | c->hw_chid = chid; | 2921 | c->hw_chid = chid; |
2922 | atomic_set(&c->bound, false); | 2922 | atomic_set(&c->bound, false); |
2923 | spin_lock_init(&c->ref_obtain_lock); | 2923 | nvgpu_spinlock_init(&c->ref_obtain_lock); |
2924 | atomic_set(&c->ref_count, 0); | 2924 | atomic_set(&c->ref_count, 0); |
2925 | c->referenceable = false; | 2925 | c->referenceable = false; |
2926 | init_waitqueue_head(&c->ref_count_dec_wq); | 2926 | init_waitqueue_head(&c->ref_count_dec_wq); |
2927 | #if GK20A_CHANNEL_REFCOUNT_TRACKING | 2927 | #if GK20A_CHANNEL_REFCOUNT_TRACKING |
2928 | spin_lock_init(&c->ref_actions_lock); | 2928 | nvgpu_spinlock_init(&c->ref_actions_lock); |
2929 | #endif | 2929 | #endif |
2930 | mutex_init(&c->ioctl_lock); | 2930 | nvgpu_mutex_init(&c->ioctl_lock); |
2931 | mutex_init(&c->error_notifier_mutex); | 2931 | nvgpu_mutex_init(&c->error_notifier_mutex); |
2932 | spin_lock_init(&c->joblist.dynamic.lock); | 2932 | nvgpu_spinlock_init(&c->joblist.dynamic.lock); |
2933 | mutex_init(&c->joblist.pre_alloc.read_lock); | 2933 | nvgpu_mutex_init(&c->joblist.pre_alloc.read_lock); |
2934 | raw_spin_lock_init(&c->timeout.lock); | 2934 | nvgpu_raw_spinlock_init(&c->timeout.lock); |
2935 | mutex_init(&c->sync_lock); | 2935 | nvgpu_mutex_init(&c->sync_lock); |
2936 | INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler); | 2936 | INIT_DELAYED_WORK(&c->timeout.wq, gk20a_channel_timeout_handler); |
2937 | INIT_DELAYED_WORK(&c->clean_up.wq, gk20a_channel_clean_up_runcb_fn); | 2937 | INIT_DELAYED_WORK(&c->clean_up.wq, gk20a_channel_clean_up_runcb_fn); |
2938 | mutex_init(&c->clean_up.lock); | 2938 | nvgpu_mutex_init(&c->clean_up.lock); |
2939 | INIT_LIST_HEAD(&c->joblist.dynamic.jobs); | 2939 | INIT_LIST_HEAD(&c->joblist.dynamic.jobs); |
2940 | #if defined(CONFIG_GK20A_CYCLE_STATS) | 2940 | #if defined(CONFIG_GK20A_CYCLE_STATS) |
2941 | mutex_init(&c->cyclestate.cyclestate_buffer_mutex); | 2941 | nvgpu_mutex_init(&c->cyclestate.cyclestate_buffer_mutex); |
2942 | mutex_init(&c->cs_client_mutex); | 2942 | nvgpu_mutex_init(&c->cs_client_mutex); |
2943 | #endif | 2943 | #endif |
2944 | INIT_LIST_HEAD(&c->dbg_s_list); | 2944 | INIT_LIST_HEAD(&c->dbg_s_list); |
2945 | INIT_LIST_HEAD(&c->event_id_list); | 2945 | INIT_LIST_HEAD(&c->event_id_list); |
2946 | mutex_init(&c->event_id_list_lock); | 2946 | nvgpu_mutex_init(&c->event_id_list_lock); |
2947 | mutex_init(&c->dbg_s_lock); | 2947 | nvgpu_mutex_init(&c->dbg_s_lock); |
2948 | list_add(&c->free_chs, &g->fifo.free_chs); | 2948 | list_add(&c->free_chs, &g->fifo.free_chs); |
2949 | 2949 | ||
2950 | return 0; | 2950 | return 0; |
@@ -3102,7 +3102,7 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait) | |||
3102 | 3102 | ||
3103 | poll_wait(filep, &event_id_data->event_id_wq, wait); | 3103 | poll_wait(filep, &event_id_data->event_id_wq, wait); |
3104 | 3104 | ||
3105 | mutex_lock(&event_id_data->lock); | 3105 | nvgpu_mutex_acquire(&event_id_data->lock); |
3106 | 3106 | ||
3107 | if (event_id_data->is_tsg) { | 3107 | if (event_id_data->is_tsg) { |
3108 | struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; | 3108 | struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; |
@@ -3127,7 +3127,7 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait) | |||
3127 | } | 3127 | } |
3128 | } | 3128 | } |
3129 | 3129 | ||
3130 | mutex_unlock(&event_id_data->lock); | 3130 | nvgpu_mutex_release(&event_id_data->lock); |
3131 | 3131 | ||
3132 | return mask; | 3132 | return mask; |
3133 | } | 3133 | } |
@@ -3140,15 +3140,15 @@ static int gk20a_event_id_release(struct inode *inode, struct file *filp) | |||
3140 | if (event_id_data->is_tsg) { | 3140 | if (event_id_data->is_tsg) { |
3141 | struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; | 3141 | struct tsg_gk20a *tsg = g->fifo.tsg + event_id_data->id; |
3142 | 3142 | ||
3143 | mutex_lock(&tsg->event_id_list_lock); | 3143 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); |
3144 | list_del_init(&event_id_data->event_id_node); | 3144 | list_del_init(&event_id_data->event_id_node); |
3145 | mutex_unlock(&tsg->event_id_list_lock); | 3145 | nvgpu_mutex_release(&tsg->event_id_list_lock); |
3146 | } else { | 3146 | } else { |
3147 | struct channel_gk20a *ch = g->fifo.channel + event_id_data->id; | 3147 | struct channel_gk20a *ch = g->fifo.channel + event_id_data->id; |
3148 | 3148 | ||
3149 | mutex_lock(&ch->event_id_list_lock); | 3149 | nvgpu_mutex_acquire(&ch->event_id_list_lock); |
3150 | list_del_init(&event_id_data->event_id_node); | 3150 | list_del_init(&event_id_data->event_id_node); |
3151 | mutex_unlock(&ch->event_id_list_lock); | 3151 | nvgpu_mutex_release(&ch->event_id_list_lock); |
3152 | } | 3152 | } |
3153 | 3153 | ||
3154 | kfree(event_id_data); | 3154 | kfree(event_id_data); |
@@ -3170,7 +3170,7 @@ static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch, | |||
3170 | struct gk20a_event_id_data *local_event_id_data; | 3170 | struct gk20a_event_id_data *local_event_id_data; |
3171 | bool event_found = false; | 3171 | bool event_found = false; |
3172 | 3172 | ||
3173 | mutex_lock(&ch->event_id_list_lock); | 3173 | nvgpu_mutex_acquire(&ch->event_id_list_lock); |
3174 | list_for_each_entry(local_event_id_data, &ch->event_id_list, | 3174 | list_for_each_entry(local_event_id_data, &ch->event_id_list, |
3175 | event_id_node) { | 3175 | event_id_node) { |
3176 | if (local_event_id_data->event_id == event_id) { | 3176 | if (local_event_id_data->event_id == event_id) { |
@@ -3178,7 +3178,7 @@ static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch, | |||
3178 | break; | 3178 | break; |
3179 | } | 3179 | } |
3180 | } | 3180 | } |
3181 | mutex_unlock(&ch->event_id_list_lock); | 3181 | nvgpu_mutex_release(&ch->event_id_list_lock); |
3182 | 3182 | ||
3183 | if (event_found) { | 3183 | if (event_found) { |
3184 | *event_id_data = local_event_id_data; | 3184 | *event_id_data = local_event_id_data; |
@@ -3199,7 +3199,7 @@ void gk20a_channel_event_id_post_event(struct channel_gk20a *ch, | |||
3199 | if (err) | 3199 | if (err) |
3200 | return; | 3200 | return; |
3201 | 3201 | ||
3202 | mutex_lock(&event_id_data->lock); | 3202 | nvgpu_mutex_acquire(&event_id_data->lock); |
3203 | 3203 | ||
3204 | gk20a_dbg_info( | 3204 | gk20a_dbg_info( |
3205 | "posting event for event_id=%d on ch=%d\n", | 3205 | "posting event for event_id=%d on ch=%d\n", |
@@ -3208,7 +3208,7 @@ void gk20a_channel_event_id_post_event(struct channel_gk20a *ch, | |||
3208 | 3208 | ||
3209 | wake_up_interruptible_all(&event_id_data->event_id_wq); | 3209 | wake_up_interruptible_all(&event_id_data->event_id_wq); |
3210 | 3210 | ||
3211 | mutex_unlock(&event_id_data->lock); | 3211 | nvgpu_mutex_release(&event_id_data->lock); |
3212 | } | 3212 | } |
3213 | 3213 | ||
3214 | static int gk20a_channel_event_id_enable(struct channel_gk20a *ch, | 3214 | static int gk20a_channel_event_id_enable(struct channel_gk20a *ch, |
@@ -3253,12 +3253,12 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch, | |||
3253 | event_id_data->event_id = event_id; | 3253 | event_id_data->event_id = event_id; |
3254 | 3254 | ||
3255 | init_waitqueue_head(&event_id_data->event_id_wq); | 3255 | init_waitqueue_head(&event_id_data->event_id_wq); |
3256 | mutex_init(&event_id_data->lock); | 3256 | nvgpu_mutex_init(&event_id_data->lock); |
3257 | INIT_LIST_HEAD(&event_id_data->event_id_node); | 3257 | INIT_LIST_HEAD(&event_id_data->event_id_node); |
3258 | 3258 | ||
3259 | mutex_lock(&ch->event_id_list_lock); | 3259 | nvgpu_mutex_acquire(&ch->event_id_list_lock); |
3260 | list_add_tail(&event_id_data->event_id_node, &ch->event_id_list); | 3260 | list_add_tail(&event_id_data->event_id_node, &ch->event_id_list); |
3261 | mutex_unlock(&ch->event_id_list_lock); | 3261 | nvgpu_mutex_release(&ch->event_id_list_lock); |
3262 | 3262 | ||
3263 | fd_install(local_fd, file); | 3263 | fd_install(local_fd, file); |
3264 | file->private_data = event_id_data; | 3264 | file->private_data = event_id_data; |
@@ -3569,7 +3569,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
3569 | 3569 | ||
3570 | /* protect our sanity for threaded userspace - most of the channel is | 3570 | /* protect our sanity for threaded userspace - most of the channel is |
3571 | * not thread safe */ | 3571 | * not thread safe */ |
3572 | mutex_lock(&ch->ioctl_lock); | 3572 | nvgpu_mutex_acquire(&ch->ioctl_lock); |
3573 | 3573 | ||
3574 | /* this ioctl call keeps a ref to the file which keeps a ref to the | 3574 | /* this ioctl call keeps a ref to the file which keeps a ref to the |
3575 | * channel */ | 3575 | * channel */ |
@@ -3660,12 +3660,12 @@ long gk20a_channel_ioctl(struct file *filp, | |||
3660 | 3660 | ||
3661 | /* waiting is thread-safe, not dropping this mutex could | 3661 | /* waiting is thread-safe, not dropping this mutex could |
3662 | * deadlock in certain conditions */ | 3662 | * deadlock in certain conditions */ |
3663 | mutex_unlock(&ch->ioctl_lock); | 3663 | nvgpu_mutex_release(&ch->ioctl_lock); |
3664 | 3664 | ||
3665 | err = gk20a_channel_wait(ch, | 3665 | err = gk20a_channel_wait(ch, |
3666 | (struct nvgpu_wait_args *)buf); | 3666 | (struct nvgpu_wait_args *)buf); |
3667 | 3667 | ||
3668 | mutex_lock(&ch->ioctl_lock); | 3668 | nvgpu_mutex_acquire(&ch->ioctl_lock); |
3669 | 3669 | ||
3670 | gk20a_idle(dev); | 3670 | gk20a_idle(dev); |
3671 | break; | 3671 | break; |
@@ -3899,7 +3899,7 @@ long gk20a_channel_ioctl(struct file *filp, | |||
3899 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | 3899 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) |
3900 | err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); | 3900 | err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); |
3901 | 3901 | ||
3902 | mutex_unlock(&ch->ioctl_lock); | 3902 | nvgpu_mutex_release(&ch->ioctl_lock); |
3903 | 3903 | ||
3904 | gk20a_channel_put(ch); | 3904 | gk20a_channel_put(ch); |
3905 | 3905 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h index f940a271..14ee9f69 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h | |||
@@ -19,15 +19,15 @@ | |||
19 | #define CHANNEL_GK20A_H | 19 | #define CHANNEL_GK20A_H |
20 | 20 | ||
21 | #include <linux/log2.h> | 21 | #include <linux/log2.h> |
22 | #include <linux/mutex.h> | ||
23 | #include <linux/poll.h> | 22 | #include <linux/poll.h> |
24 | #include <linux/semaphore.h> | 23 | #include <linux/semaphore.h> |
25 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/stacktrace.h> | 25 | #include <linux/stacktrace.h> |
28 | #include <linux/wait.h> | 26 | #include <linux/wait.h> |
29 | #include <uapi/linux/nvgpu.h> | 27 | #include <uapi/linux/nvgpu.h> |
30 | 28 | ||
29 | #include <nvgpu/lock.h> | ||
30 | |||
31 | struct gk20a; | 31 | struct gk20a; |
32 | struct gr_gk20a; | 32 | struct gr_gk20a; |
33 | struct dbg_session_gk20a; | 33 | struct dbg_session_gk20a; |
@@ -80,18 +80,18 @@ struct channel_gk20a_joblist { | |||
80 | unsigned int put; | 80 | unsigned int put; |
81 | unsigned int get; | 81 | unsigned int get; |
82 | struct channel_gk20a_job *jobs; | 82 | struct channel_gk20a_job *jobs; |
83 | struct mutex read_lock; | 83 | struct nvgpu_mutex read_lock; |
84 | } pre_alloc; | 84 | } pre_alloc; |
85 | 85 | ||
86 | struct { | 86 | struct { |
87 | struct list_head jobs; | 87 | struct list_head jobs; |
88 | spinlock_t lock; | 88 | struct nvgpu_spinlock lock; |
89 | } dynamic; | 89 | } dynamic; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | struct channel_gk20a_timeout { | 92 | struct channel_gk20a_timeout { |
93 | struct delayed_work wq; | 93 | struct delayed_work wq; |
94 | raw_spinlock_t lock; | 94 | struct nvgpu_raw_spinlock lock; |
95 | bool initialized; | 95 | bool initialized; |
96 | u32 gp_get; | 96 | u32 gp_get; |
97 | }; | 97 | }; |
@@ -106,12 +106,12 @@ struct gk20a_event_id_data { | |||
106 | bool event_posted; | 106 | bool event_posted; |
107 | 107 | ||
108 | wait_queue_head_t event_id_wq; | 108 | wait_queue_head_t event_id_wq; |
109 | struct mutex lock; | 109 | struct nvgpu_mutex lock; |
110 | struct list_head event_id_node; | 110 | struct list_head event_id_node; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | struct channel_gk20a_clean_up { | 113 | struct channel_gk20a_clean_up { |
114 | struct mutex lock; | 114 | struct nvgpu_mutex lock; |
115 | bool scheduled; | 115 | bool scheduled; |
116 | struct delayed_work wq; | 116 | struct delayed_work wq; |
117 | }; | 117 | }; |
@@ -156,7 +156,7 @@ struct channel_gk20a { | |||
156 | 156 | ||
157 | struct list_head free_chs; | 157 | struct list_head free_chs; |
158 | 158 | ||
159 | spinlock_t ref_obtain_lock; | 159 | struct nvgpu_spinlock ref_obtain_lock; |
160 | bool referenceable; | 160 | bool referenceable; |
161 | atomic_t ref_count; | 161 | atomic_t ref_count; |
162 | wait_queue_head_t ref_count_dec_wq; | 162 | wait_queue_head_t ref_count_dec_wq; |
@@ -169,7 +169,7 @@ struct channel_gk20a { | |||
169 | struct channel_gk20a_ref_action ref_actions[ | 169 | struct channel_gk20a_ref_action ref_actions[ |
170 | GK20A_CHANNEL_REFCOUNT_TRACKING]; | 170 | GK20A_CHANNEL_REFCOUNT_TRACKING]; |
171 | size_t ref_actions_put; /* index of next write */ | 171 | size_t ref_actions_put; /* index of next write */ |
172 | spinlock_t ref_actions_lock; | 172 | struct nvgpu_spinlock ref_actions_lock; |
173 | #endif | 173 | #endif |
174 | 174 | ||
175 | struct nvgpu_semaphore_int *hw_sema; | 175 | struct nvgpu_semaphore_int *hw_sema; |
@@ -183,7 +183,7 @@ struct channel_gk20a { | |||
183 | bool cde; | 183 | bool cde; |
184 | pid_t pid; | 184 | pid_t pid; |
185 | pid_t tgid; | 185 | pid_t tgid; |
186 | struct mutex ioctl_lock; | 186 | struct nvgpu_mutex ioctl_lock; |
187 | 187 | ||
188 | int tsgid; | 188 | int tsgid; |
189 | struct list_head ch_entry; /* channel's entry in TSG */ | 189 | struct list_head ch_entry; /* channel's entry in TSG */ |
@@ -221,17 +221,17 @@ struct channel_gk20a { | |||
221 | void *cyclestate_buffer; | 221 | void *cyclestate_buffer; |
222 | u32 cyclestate_buffer_size; | 222 | u32 cyclestate_buffer_size; |
223 | struct dma_buf *cyclestate_buffer_handler; | 223 | struct dma_buf *cyclestate_buffer_handler; |
224 | struct mutex cyclestate_buffer_mutex; | 224 | struct nvgpu_mutex cyclestate_buffer_mutex; |
225 | } cyclestate; | 225 | } cyclestate; |
226 | 226 | ||
227 | struct mutex cs_client_mutex; | 227 | struct nvgpu_mutex cs_client_mutex; |
228 | struct gk20a_cs_snapshot_client *cs_client; | 228 | struct gk20a_cs_snapshot_client *cs_client; |
229 | #endif | 229 | #endif |
230 | struct mutex dbg_s_lock; | 230 | struct nvgpu_mutex dbg_s_lock; |
231 | struct list_head dbg_s_list; | 231 | struct list_head dbg_s_list; |
232 | 232 | ||
233 | struct list_head event_id_list; | 233 | struct list_head event_id_list; |
234 | struct mutex event_id_list_lock; | 234 | struct nvgpu_mutex event_id_list_lock; |
235 | 235 | ||
236 | bool has_timedout; | 236 | bool has_timedout; |
237 | u32 timeout_ms_max; | 237 | u32 timeout_ms_max; |
@@ -241,9 +241,9 @@ struct channel_gk20a { | |||
241 | struct dma_buf *error_notifier_ref; | 241 | struct dma_buf *error_notifier_ref; |
242 | struct nvgpu_notification *error_notifier; | 242 | struct nvgpu_notification *error_notifier; |
243 | void *error_notifier_va; | 243 | void *error_notifier_va; |
244 | struct mutex error_notifier_mutex; | 244 | struct nvgpu_mutex error_notifier_mutex; |
245 | 245 | ||
246 | struct mutex sync_lock; | 246 | struct nvgpu_mutex sync_lock; |
247 | struct gk20a_channel_sync *sync; | 247 | struct gk20a_channel_sync *sync; |
248 | 248 | ||
249 | #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION | 249 | #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION |
@@ -254,7 +254,7 @@ struct channel_gk20a { | |||
254 | * via schedule_work */ | 254 | * via schedule_work */ |
255 | void (*update_fn)(struct channel_gk20a *, void *); | 255 | void (*update_fn)(struct channel_gk20a *, void *); |
256 | void *update_fn_data; | 256 | void *update_fn_data; |
257 | spinlock_t update_fn_lock; /* make access to the two above atomic */ | 257 | struct nvgpu_spinlock update_fn_lock; /* make access to the two above atomic */ |
258 | struct work_struct update_fn_work; | 258 | struct work_struct update_fn_work; |
259 | 259 | ||
260 | u32 interleave_level; | 260 | u32 interleave_level; |
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c index 18971b09..097635a7 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c | |||
@@ -414,9 +414,9 @@ struct wait_fence_work { | |||
414 | static void gk20a_add_pending_sema_wait(struct gk20a *g, | 414 | static void gk20a_add_pending_sema_wait(struct gk20a *g, |
415 | struct wait_fence_work *work) | 415 | struct wait_fence_work *work) |
416 | { | 416 | { |
417 | raw_spin_lock(&g->pending_sema_waits_lock); | 417 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); |
418 | list_add(&work->entry, &g->pending_sema_waits); | 418 | list_add(&work->entry, &g->pending_sema_waits); |
419 | raw_spin_unlock(&g->pending_sema_waits_lock); | 419 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); |
420 | } | 420 | } |
421 | 421 | ||
422 | /* | 422 | /* |
@@ -426,9 +426,9 @@ static void gk20a_add_pending_sema_wait(struct gk20a *g, | |||
426 | static void gk20a_start_sema_wait_cancel(struct gk20a *g, | 426 | static void gk20a_start_sema_wait_cancel(struct gk20a *g, |
427 | struct list_head *list) | 427 | struct list_head *list) |
428 | { | 428 | { |
429 | raw_spin_lock(&g->pending_sema_waits_lock); | 429 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); |
430 | list_replace_init(&g->pending_sema_waits, list); | 430 | list_replace_init(&g->pending_sema_waits, list); |
431 | raw_spin_unlock(&g->pending_sema_waits_lock); | 431 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); |
432 | } | 432 | } |
433 | 433 | ||
434 | /* | 434 | /* |
@@ -486,10 +486,10 @@ static void gk20a_channel_semaphore_launcher( | |||
486 | * This spinlock must protect a _very_ small critical section - | 486 | * This spinlock must protect a _very_ small critical section - |
487 | * otherwise it's possible that the deterministic submit path suffers. | 487 | * otherwise it's possible that the deterministic submit path suffers. |
488 | */ | 488 | */ |
489 | raw_spin_lock(&g->pending_sema_waits_lock); | 489 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); |
490 | if (!list_empty(&g->pending_sema_waits)) | 490 | if (!list_empty(&g->pending_sema_waits)) |
491 | list_del_init(&w->entry); | 491 | list_del_init(&w->entry); |
492 | raw_spin_unlock(&g->pending_sema_waits_lock); | 492 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); |
493 | 493 | ||
494 | gk20a_dbg_info("waiting for pre fence %p '%s'", | 494 | gk20a_dbg_info("waiting for pre fence %p '%s'", |
495 | fence, fence->name); | 495 | fence, fence->name); |
diff --git a/drivers/gpu/nvgpu/gk20a/clk_gk20a.c b/drivers/gpu/nvgpu/gk20a/clk_gk20a.c index 32690c90..38d13b4b 100644 --- a/drivers/gpu/nvgpu/gk20a/clk_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/clk_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A Clocks | 2 | * GK20A Clocks |
3 | * | 3 | * |
4 | * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -457,7 +457,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g) | |||
457 | clk->gpc_pll.freq /= pl_to_div[clk->gpc_pll.PL]; | 457 | clk->gpc_pll.freq /= pl_to_div[clk->gpc_pll.PL]; |
458 | } | 458 | } |
459 | 459 | ||
460 | mutex_init(&clk->clk_mutex); | 460 | nvgpu_mutex_init(&clk->clk_mutex); |
461 | 461 | ||
462 | clk->sw_ready = true; | 462 | clk->sw_ready = true; |
463 | 463 | ||
@@ -538,14 +538,14 @@ static int gk20a_clk_export_set_rate(void *data, unsigned long *rate) | |||
538 | struct clk_gk20a *clk = &g->clk; | 538 | struct clk_gk20a *clk = &g->clk; |
539 | 539 | ||
540 | if (rate) { | 540 | if (rate) { |
541 | mutex_lock(&clk->clk_mutex); | 541 | nvgpu_mutex_acquire(&clk->clk_mutex); |
542 | old_freq = clk->gpc_pll.freq; | 542 | old_freq = clk->gpc_pll.freq; |
543 | ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq); | 543 | ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq); |
544 | if (!ret && clk->gpc_pll.enabled) | 544 | if (!ret && clk->gpc_pll.enabled) |
545 | ret = set_pll_freq(g, clk->gpc_pll.freq, old_freq); | 545 | ret = set_pll_freq(g, clk->gpc_pll.freq, old_freq); |
546 | if (!ret) | 546 | if (!ret) |
547 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); | 547 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); |
548 | mutex_unlock(&clk->clk_mutex); | 548 | nvgpu_mutex_release(&clk->clk_mutex); |
549 | } | 549 | } |
550 | return ret; | 550 | return ret; |
551 | } | 551 | } |
@@ -556,9 +556,9 @@ static int gk20a_clk_export_enable(void *data) | |||
556 | struct gk20a *g = data; | 556 | struct gk20a *g = data; |
557 | struct clk_gk20a *clk = &g->clk; | 557 | struct clk_gk20a *clk = &g->clk; |
558 | 558 | ||
559 | mutex_lock(&clk->clk_mutex); | 559 | nvgpu_mutex_acquire(&clk->clk_mutex); |
560 | ret = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq); | 560 | ret = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq); |
561 | mutex_unlock(&clk->clk_mutex); | 561 | nvgpu_mutex_release(&clk->clk_mutex); |
562 | return ret; | 562 | return ret; |
563 | } | 563 | } |
564 | 564 | ||
@@ -567,10 +567,10 @@ static void gk20a_clk_export_disable(void *data) | |||
567 | struct gk20a *g = data; | 567 | struct gk20a *g = data; |
568 | struct clk_gk20a *clk = &g->clk; | 568 | struct clk_gk20a *clk = &g->clk; |
569 | 569 | ||
570 | mutex_lock(&clk->clk_mutex); | 570 | nvgpu_mutex_acquire(&clk->clk_mutex); |
571 | if (g->clk.clk_hw_on) | 571 | if (g->clk.clk_hw_on) |
572 | clk_disable_gpcpll(g, 1); | 572 | clk_disable_gpcpll(g, 1); |
573 | mutex_unlock(&clk->clk_mutex); | 573 | nvgpu_mutex_release(&clk->clk_mutex); |
574 | } | 574 | } |
575 | 575 | ||
576 | static void gk20a_clk_export_init(void *data, unsigned long *rate, bool *state) | 576 | static void gk20a_clk_export_init(void *data, unsigned long *rate, bool *state) |
@@ -578,12 +578,12 @@ static void gk20a_clk_export_init(void *data, unsigned long *rate, bool *state) | |||
578 | struct gk20a *g = data; | 578 | struct gk20a *g = data; |
579 | struct clk_gk20a *clk = &g->clk; | 579 | struct clk_gk20a *clk = &g->clk; |
580 | 580 | ||
581 | mutex_lock(&clk->clk_mutex); | 581 | nvgpu_mutex_acquire(&clk->clk_mutex); |
582 | if (state) | 582 | if (state) |
583 | *state = clk->gpc_pll.enabled; | 583 | *state = clk->gpc_pll.enabled; |
584 | if (rate) | 584 | if (rate) |
585 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); | 585 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); |
586 | mutex_unlock(&clk->clk_mutex); | 586 | nvgpu_mutex_release(&clk->clk_mutex); |
587 | } | 587 | } |
588 | 588 | ||
589 | static struct tegra_clk_export_ops gk20a_clk_export_ops = { | 589 | static struct tegra_clk_export_ops gk20a_clk_export_ops = { |
@@ -640,11 +640,11 @@ static int gk20a_init_clk_support(struct gk20a *g) | |||
640 | if (err) | 640 | if (err) |
641 | return err; | 641 | return err; |
642 | 642 | ||
643 | mutex_lock(&clk->clk_mutex); | 643 | nvgpu_mutex_acquire(&clk->clk_mutex); |
644 | clk->clk_hw_on = true; | 644 | clk->clk_hw_on = true; |
645 | 645 | ||
646 | err = gk20a_init_clk_setup_hw(g); | 646 | err = gk20a_init_clk_setup_hw(g); |
647 | mutex_unlock(&clk->clk_mutex); | 647 | nvgpu_mutex_release(&clk->clk_mutex); |
648 | if (err) | 648 | if (err) |
649 | return err; | 649 | return err; |
650 | 650 | ||
@@ -658,9 +658,9 @@ static int gk20a_init_clk_support(struct gk20a *g) | |||
658 | return err; | 658 | return err; |
659 | 659 | ||
660 | /* The prev call may not enable PLL if gbus is unbalanced - force it */ | 660 | /* The prev call may not enable PLL if gbus is unbalanced - force it */ |
661 | mutex_lock(&clk->clk_mutex); | 661 | nvgpu_mutex_acquire(&clk->clk_mutex); |
662 | err = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq); | 662 | err = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq); |
663 | mutex_unlock(&clk->clk_mutex); | 663 | nvgpu_mutex_release(&clk->clk_mutex); |
664 | if (err) | 664 | if (err) |
665 | return err; | 665 | return err; |
666 | 666 | ||
@@ -680,10 +680,10 @@ static int gk20a_suspend_clk_support(struct gk20a *g) | |||
680 | clk_disable(g->clk.tegra_clk); | 680 | clk_disable(g->clk.tegra_clk); |
681 | 681 | ||
682 | /* The prev call may not disable PLL if gbus is unbalanced - force it */ | 682 | /* The prev call may not disable PLL if gbus is unbalanced - force it */ |
683 | mutex_lock(&g->clk.clk_mutex); | 683 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
684 | ret = clk_disable_gpcpll(g, 1); | 684 | ret = clk_disable_gpcpll(g, 1); |
685 | g->clk.clk_hw_on = false; | 685 | g->clk.clk_hw_on = false; |
686 | mutex_unlock(&g->clk.clk_mutex); | 686 | nvgpu_mutex_release(&g->clk.clk_mutex); |
687 | return ret; | 687 | return ret; |
688 | } | 688 | } |
689 | 689 | ||
@@ -714,10 +714,10 @@ static int pll_reg_show(struct seq_file *s, void *data) | |||
714 | struct gk20a *g = s->private; | 714 | struct gk20a *g = s->private; |
715 | u32 reg, m, n, pl, f; | 715 | u32 reg, m, n, pl, f; |
716 | 716 | ||
717 | mutex_lock(&g->clk.clk_mutex); | 717 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
718 | if (!g->clk.clk_hw_on) { | 718 | if (!g->clk.clk_hw_on) { |
719 | seq_printf(s, "gk20a powered down - no access to registers\n"); | 719 | seq_printf(s, "gk20a powered down - no access to registers\n"); |
720 | mutex_unlock(&g->clk.clk_mutex); | 720 | nvgpu_mutex_release(&g->clk.clk_mutex); |
721 | return 0; | 721 | return 0; |
722 | } | 722 | } |
723 | 723 | ||
@@ -733,7 +733,7 @@ static int pll_reg_show(struct seq_file *s, void *data) | |||
733 | f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div[pl]); | 733 | f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div[pl]); |
734 | seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl); | 734 | seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl); |
735 | seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2); | 735 | seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2); |
736 | mutex_unlock(&g->clk.clk_mutex); | 736 | nvgpu_mutex_release(&g->clk.clk_mutex); |
737 | return 0; | 737 | return 0; |
738 | } | 738 | } |
739 | 739 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/clk_gk20a.h b/drivers/gpu/nvgpu/gk20a/clk_gk20a.h index a45dfcb7..8260fd4a 100644 --- a/drivers/gpu/nvgpu/gk20a/clk_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/clk_gk20a.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2011 - 2016, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2011 - 2017, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | 5 | * under the terms and conditions of the GNU General Public License, |
@@ -16,7 +16,7 @@ | |||
16 | #ifndef CLK_GK20A_H | 16 | #ifndef CLK_GK20A_H |
17 | #define CLK_GK20A_H | 17 | #define CLK_GK20A_H |
18 | 18 | ||
19 | #include <linux/mutex.h> | 19 | #include <nvgpu/lock.h> |
20 | #include <linux/clkdev.h> | 20 | #include <linux/clkdev.h> |
21 | #include <linux/clk-provider.h> | 21 | #include <linux/clk-provider.h> |
22 | 22 | ||
@@ -86,7 +86,7 @@ struct clk_gk20a { | |||
86 | #endif | 86 | #endif |
87 | struct pll gpc_pll; | 87 | struct pll gpc_pll; |
88 | struct pll gpc_pll_last; | 88 | struct pll gpc_pll_last; |
89 | struct mutex clk_mutex; | 89 | struct nvgpu_mutex clk_mutex; |
90 | struct namemap_cfg *clk_namemap; | 90 | struct namemap_cfg *clk_namemap; |
91 | u32 namemap_num; | 91 | u32 namemap_num; |
92 | u32 *namemap_xlat_table; | 92 | u32 *namemap_xlat_table; |
diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c index aa92796c..4bc7ee52 100644 --- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). | 2 | * GK20A Cycle stats snapshots support (subsystem for gr_gk20a). |
3 | * | 3 | * |
4 | * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
20 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
21 | #include <linux/dma-buf.h> | 21 | #include <linux/dma-buf.h> |
22 | #include <linux/mutex.h> | 22 | #include <nvgpu/lock.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | 24 | ||
25 | #include "gk20a.h" | 25 | #include "gk20a.h" |
@@ -557,7 +557,7 @@ int gr_gk20a_css_attach(struct channel_gk20a *ch, | |||
557 | gr = &g->gr; | 557 | gr = &g->gr; |
558 | *cs_client = NULL; | 558 | *cs_client = NULL; |
559 | 559 | ||
560 | mutex_lock(&gr->cs_lock); | 560 | nvgpu_mutex_acquire(&gr->cs_lock); |
561 | 561 | ||
562 | ret = css_gr_create_shared_data(gr); | 562 | ret = css_gr_create_shared_data(gr); |
563 | if (ret) | 563 | if (ret) |
@@ -577,7 +577,7 @@ int gr_gk20a_css_attach(struct channel_gk20a *ch, | |||
577 | if (perfmon_start) | 577 | if (perfmon_start) |
578 | *perfmon_start = (*cs_client)->perfmon_start; | 578 | *perfmon_start = (*cs_client)->perfmon_start; |
579 | 579 | ||
580 | mutex_unlock(&gr->cs_lock); | 580 | nvgpu_mutex_release(&gr->cs_lock); |
581 | 581 | ||
582 | return 0; | 582 | return 0; |
583 | 583 | ||
@@ -591,7 +591,7 @@ failed: | |||
591 | if (list_empty(&gr->cs_data->clients)) | 591 | if (list_empty(&gr->cs_data->clients)) |
592 | css_gr_free_shared_data(gr); | 592 | css_gr_free_shared_data(gr); |
593 | } | 593 | } |
594 | mutex_unlock(&gr->cs_lock); | 594 | nvgpu_mutex_release(&gr->cs_lock); |
595 | 595 | ||
596 | if (perfmon_start) | 596 | if (perfmon_start) |
597 | *perfmon_start = 0; | 597 | *perfmon_start = 0; |
@@ -610,7 +610,7 @@ int gr_gk20a_css_detach(struct channel_gk20a *ch, | |||
610 | return -EINVAL; | 610 | return -EINVAL; |
611 | 611 | ||
612 | gr = &g->gr; | 612 | gr = &g->gr; |
613 | mutex_lock(&gr->cs_lock); | 613 | nvgpu_mutex_acquire(&gr->cs_lock); |
614 | if (gr->cs_data) { | 614 | if (gr->cs_data) { |
615 | struct gk20a_cs_snapshot *data = gr->cs_data; | 615 | struct gk20a_cs_snapshot *data = gr->cs_data; |
616 | 616 | ||
@@ -623,7 +623,7 @@ int gr_gk20a_css_detach(struct channel_gk20a *ch, | |||
623 | } else { | 623 | } else { |
624 | ret = -EBADF; | 624 | ret = -EBADF; |
625 | } | 625 | } |
626 | mutex_unlock(&gr->cs_lock); | 626 | nvgpu_mutex_release(&gr->cs_lock); |
627 | 627 | ||
628 | return ret; | 628 | return ret; |
629 | } | 629 | } |
@@ -639,9 +639,9 @@ int gr_gk20a_css_flush(struct channel_gk20a *ch, | |||
639 | return -EINVAL; | 639 | return -EINVAL; |
640 | 640 | ||
641 | gr = &g->gr; | 641 | gr = &g->gr; |
642 | mutex_lock(&gr->cs_lock); | 642 | nvgpu_mutex_acquire(&gr->cs_lock); |
643 | ret = css_gr_flush_snapshots(ch); | 643 | ret = css_gr_flush_snapshots(ch); |
644 | mutex_unlock(&gr->cs_lock); | 644 | nvgpu_mutex_release(&gr->cs_lock); |
645 | 645 | ||
646 | return ret; | 646 | return ret; |
647 | } | 647 | } |
@@ -651,10 +651,10 @@ void gr_gk20a_free_cyclestats_snapshot_data(struct gk20a *g) | |||
651 | { | 651 | { |
652 | struct gr_gk20a *gr = &g->gr; | 652 | struct gr_gk20a *gr = &g->gr; |
653 | 653 | ||
654 | mutex_lock(&gr->cs_lock); | 654 | nvgpu_mutex_acquire(&gr->cs_lock); |
655 | css_gr_free_shared_data(gr); | 655 | css_gr_free_shared_data(gr); |
656 | mutex_unlock(&gr->cs_lock); | 656 | nvgpu_mutex_release(&gr->cs_lock); |
657 | mutex_destroy(&gr->cs_lock); | 657 | nvgpu_mutex_destroy(&gr->cs_lock); |
658 | } | 658 | } |
659 | 659 | ||
660 | static int css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, | 660 | static int css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, |
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c index 5c9baf77..351be55e 100644 --- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c | |||
@@ -349,7 +349,7 @@ static int nvgpu_gpu_ioctl_inval_icache( | |||
349 | ops.offset = gr_pri_gpc0_gcc_dbg_r(); | 349 | ops.offset = gr_pri_gpc0_gcc_dbg_r(); |
350 | 350 | ||
351 | /* Take the global lock, since we'll be doing global regops */ | 351 | /* Take the global lock, since we'll be doing global regops */ |
352 | mutex_lock(&g->dbg_sessions_lock); | 352 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
353 | 353 | ||
354 | err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); | 354 | err = gr_gk20a_exec_ctx_ops(ch, &ops, 1, 0, 1); |
355 | 355 | ||
@@ -371,7 +371,7 @@ static int nvgpu_gpu_ioctl_inval_icache( | |||
371 | gk20a_writel(g, gr_pri_gpc0_tpc0_sm_cache_control_r(), cache_ctrl); | 371 | gk20a_writel(g, gr_pri_gpc0_tpc0_sm_cache_control_r(), cache_ctrl); |
372 | 372 | ||
373 | end: | 373 | end: |
374 | mutex_unlock(&g->dbg_sessions_lock); | 374 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
375 | return err; | 375 | return err; |
376 | } | 376 | } |
377 | 377 | ||
@@ -384,9 +384,9 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode( | |||
384 | return -EINVAL; | 384 | return -EINVAL; |
385 | } | 385 | } |
386 | 386 | ||
387 | mutex_lock(&g->dbg_sessions_lock); | 387 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
388 | g->ops.mm.set_debug_mode(g, args->state == 1); | 388 | g->ops.mm.set_debug_mode(g, args->state == 1); |
389 | mutex_unlock(&g->dbg_sessions_lock); | 389 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
390 | 390 | ||
391 | gk20a_idle(g->dev); | 391 | gk20a_idle(g->dev); |
392 | return 0; | 392 | return 0; |
@@ -403,13 +403,13 @@ static int nvgpu_gpu_ioctl_set_debug_mode( | |||
403 | if (!ch) | 403 | if (!ch) |
404 | return -EINVAL; | 404 | return -EINVAL; |
405 | 405 | ||
406 | mutex_lock(&g->dbg_sessions_lock); | 406 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
407 | if (g->ops.gr.set_sm_debug_mode) | 407 | if (g->ops.gr.set_sm_debug_mode) |
408 | err = g->ops.gr.set_sm_debug_mode(g, ch, | 408 | err = g->ops.gr.set_sm_debug_mode(g, ch, |
409 | args->sms, !!args->enable); | 409 | args->sms, !!args->enable); |
410 | else | 410 | else |
411 | err = -ENOSYS; | 411 | err = -ENOSYS; |
412 | mutex_unlock(&g->dbg_sessions_lock); | 412 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
413 | 413 | ||
414 | return err; | 414 | return err; |
415 | } | 415 | } |
@@ -419,7 +419,7 @@ static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g) | |||
419 | int err = 0; | 419 | int err = 0; |
420 | u32 dbgr_control0; | 420 | u32 dbgr_control0; |
421 | 421 | ||
422 | mutex_lock(&g->dbg_sessions_lock); | 422 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
423 | /* assert stop trigger. uniformity assumption: all SMs will have | 423 | /* assert stop trigger. uniformity assumption: all SMs will have |
424 | * the same state in dbg_control0. */ | 424 | * the same state in dbg_control0. */ |
425 | dbgr_control0 = | 425 | dbgr_control0 = |
@@ -430,7 +430,7 @@ static int nvgpu_gpu_ioctl_trigger_suspend(struct gk20a *g) | |||
430 | gk20a_writel(g, | 430 | gk20a_writel(g, |
431 | gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0); | 431 | gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0); |
432 | 432 | ||
433 | mutex_unlock(&g->dbg_sessions_lock); | 433 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
434 | return err; | 434 | return err; |
435 | } | 435 | } |
436 | 436 | ||
@@ -456,7 +456,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g, | |||
456 | gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f() | | 456 | gr_gpc0_tpc0_sm_hww_global_esr_bpt_pause_pending_f() | |
457 | gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(); | 457 | gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f(); |
458 | 458 | ||
459 | mutex_lock(&g->dbg_sessions_lock); | 459 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
460 | 460 | ||
461 | /* Lock down all SMs */ | 461 | /* Lock down all SMs */ |
462 | for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { | 462 | for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { |
@@ -482,7 +482,7 @@ static int nvgpu_gpu_ioctl_wait_for_pause(struct gk20a *g, | |||
482 | } | 482 | } |
483 | 483 | ||
484 | end: | 484 | end: |
485 | mutex_unlock(&g->dbg_sessions_lock); | 485 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
486 | kfree(w_state); | 486 | kfree(w_state); |
487 | return err; | 487 | return err; |
488 | } | 488 | } |
@@ -491,7 +491,7 @@ static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g) | |||
491 | { | 491 | { |
492 | int err = 0; | 492 | int err = 0; |
493 | 493 | ||
494 | mutex_lock(&g->dbg_sessions_lock); | 494 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
495 | 495 | ||
496 | /* Clear the pause mask to tell the GPU we want to resume everyone */ | 496 | /* Clear the pause mask to tell the GPU we want to resume everyone */ |
497 | gk20a_writel(g, | 497 | gk20a_writel(g, |
@@ -505,7 +505,7 @@ static int nvgpu_gpu_ioctl_resume_from_pause(struct gk20a *g) | |||
505 | * then a 1 to the run trigger */ | 505 | * then a 1 to the run trigger */ |
506 | gk20a_resume_all_sms(g); | 506 | gk20a_resume_all_sms(g); |
507 | 507 | ||
508 | mutex_unlock(&g->dbg_sessions_lock); | 508 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
509 | return err; | 509 | return err; |
510 | } | 510 | } |
511 | 511 | ||
@@ -551,7 +551,7 @@ static int nvgpu_gpu_ioctl_has_any_exception( | |||
551 | u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); | 551 | u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); |
552 | u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); | 552 | u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); |
553 | 553 | ||
554 | mutex_lock(&g->dbg_sessions_lock); | 554 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
555 | 555 | ||
556 | for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { | 556 | for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { |
557 | 557 | ||
@@ -565,7 +565,7 @@ static int nvgpu_gpu_ioctl_has_any_exception( | |||
565 | tpc_exception_en |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval) << sm_id; | 565 | tpc_exception_en |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(regval) << sm_id; |
566 | } | 566 | } |
567 | 567 | ||
568 | mutex_unlock(&g->dbg_sessions_lock); | 568 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
569 | args->tpc_exception_en_sm_mask = tpc_exception_en; | 569 | args->tpc_exception_en_sm_mask = tpc_exception_en; |
570 | return err; | 570 | return err; |
571 | } | 571 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c index 705eccaa..ffd15a37 100644 --- a/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ctxsw_trace_gk20a.c | |||
@@ -47,7 +47,7 @@ struct gk20a_ctxsw_dev { | |||
47 | 47 | ||
48 | atomic_t vma_ref; | 48 | atomic_t vma_ref; |
49 | 49 | ||
50 | struct mutex write_lock; | 50 | struct nvgpu_mutex write_lock; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | 53 | ||
@@ -83,16 +83,16 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size, | |||
83 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, | 83 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, |
84 | "filp=%p buf=%p size=%zu", filp, buf, size); | 84 | "filp=%p buf=%p size=%zu", filp, buf, size); |
85 | 85 | ||
86 | mutex_lock(&dev->write_lock); | 86 | nvgpu_mutex_acquire(&dev->write_lock); |
87 | while (ring_is_empty(hdr)) { | 87 | while (ring_is_empty(hdr)) { |
88 | mutex_unlock(&dev->write_lock); | 88 | nvgpu_mutex_release(&dev->write_lock); |
89 | if (filp->f_flags & O_NONBLOCK) | 89 | if (filp->f_flags & O_NONBLOCK) |
90 | return -EAGAIN; | 90 | return -EAGAIN; |
91 | err = wait_event_interruptible(dev->readout_wq, | 91 | err = wait_event_interruptible(dev->readout_wq, |
92 | !ring_is_empty(hdr)); | 92 | !ring_is_empty(hdr)); |
93 | if (err) | 93 | if (err) |
94 | return err; | 94 | return err; |
95 | mutex_lock(&dev->write_lock); | 95 | nvgpu_mutex_acquire(&dev->write_lock); |
96 | } | 96 | } |
97 | 97 | ||
98 | while (size >= sizeof(struct nvgpu_ctxsw_trace_entry)) { | 98 | while (size >= sizeof(struct nvgpu_ctxsw_trace_entry)) { |
@@ -101,7 +101,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size, | |||
101 | 101 | ||
102 | if (copy_to_user(entry, &dev->ents[hdr->read_idx], | 102 | if (copy_to_user(entry, &dev->ents[hdr->read_idx], |
103 | sizeof(*entry))) { | 103 | sizeof(*entry))) { |
104 | mutex_unlock(&dev->write_lock); | 104 | nvgpu_mutex_release(&dev->write_lock); |
105 | return -EFAULT; | 105 | return -EFAULT; |
106 | } | 106 | } |
107 | 107 | ||
@@ -118,7 +118,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size, | |||
118 | hdr->read_idx); | 118 | hdr->read_idx); |
119 | 119 | ||
120 | *off = hdr->read_idx; | 120 | *off = hdr->read_idx; |
121 | mutex_unlock(&dev->write_lock); | 121 | nvgpu_mutex_release(&dev->write_lock); |
122 | 122 | ||
123 | return copied; | 123 | return copied; |
124 | } | 124 | } |
@@ -126,9 +126,9 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size, | |||
126 | static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev) | 126 | static int gk20a_ctxsw_dev_ioctl_trace_enable(struct gk20a_ctxsw_dev *dev) |
127 | { | 127 | { |
128 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled"); | 128 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace enabled"); |
129 | mutex_lock(&dev->write_lock); | 129 | nvgpu_mutex_acquire(&dev->write_lock); |
130 | dev->write_enabled = true; | 130 | dev->write_enabled = true; |
131 | mutex_unlock(&dev->write_lock); | 131 | nvgpu_mutex_release(&dev->write_lock); |
132 | dev->g->ops.fecs_trace.enable(dev->g); | 132 | dev->g->ops.fecs_trace.enable(dev->g); |
133 | return 0; | 133 | return 0; |
134 | } | 134 | } |
@@ -137,9 +137,9 @@ static int gk20a_ctxsw_dev_ioctl_trace_disable(struct gk20a_ctxsw_dev *dev) | |||
137 | { | 137 | { |
138 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled"); | 138 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "trace disabled"); |
139 | dev->g->ops.fecs_trace.disable(dev->g); | 139 | dev->g->ops.fecs_trace.disable(dev->g); |
140 | mutex_lock(&dev->write_lock); | 140 | nvgpu_mutex_acquire(&dev->write_lock); |
141 | dev->write_enabled = false; | 141 | dev->write_enabled = false; |
142 | mutex_unlock(&dev->write_lock); | 142 | nvgpu_mutex_release(&dev->write_lock); |
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
@@ -211,9 +211,9 @@ static int gk20a_ctxsw_dev_ioctl_ring_setup(struct gk20a_ctxsw_dev *dev, | |||
211 | if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE) | 211 | if (size > GK20A_CTXSW_TRACE_MAX_VM_RING_SIZE) |
212 | return -EINVAL; | 212 | return -EINVAL; |
213 | 213 | ||
214 | mutex_lock(&dev->write_lock); | 214 | nvgpu_mutex_acquire(&dev->write_lock); |
215 | ret = gk20a_ctxsw_dev_alloc_buffer(dev, size); | 215 | ret = gk20a_ctxsw_dev_alloc_buffer(dev, size); |
216 | mutex_unlock(&dev->write_lock); | 216 | nvgpu_mutex_release(&dev->write_lock); |
217 | 217 | ||
218 | return ret; | 218 | return ret; |
219 | } | 219 | } |
@@ -223,9 +223,9 @@ static int gk20a_ctxsw_dev_ioctl_set_filter(struct gk20a_ctxsw_dev *dev, | |||
223 | { | 223 | { |
224 | struct gk20a *g = dev->g; | 224 | struct gk20a *g = dev->g; |
225 | 225 | ||
226 | mutex_lock(&dev->write_lock); | 226 | nvgpu_mutex_acquire(&dev->write_lock); |
227 | dev->filter = args->filter; | 227 | dev->filter = args->filter; |
228 | mutex_unlock(&dev->write_lock); | 228 | nvgpu_mutex_release(&dev->write_lock); |
229 | 229 | ||
230 | if (g->ops.fecs_trace.set_filter) | 230 | if (g->ops.fecs_trace.set_filter) |
231 | g->ops.fecs_trace.set_filter(g, &dev->filter); | 231 | g->ops.fecs_trace.set_filter(g, &dev->filter); |
@@ -235,9 +235,9 @@ static int gk20a_ctxsw_dev_ioctl_set_filter(struct gk20a_ctxsw_dev *dev, | |||
235 | static int gk20a_ctxsw_dev_ioctl_get_filter(struct gk20a_ctxsw_dev *dev, | 235 | static int gk20a_ctxsw_dev_ioctl_get_filter(struct gk20a_ctxsw_dev *dev, |
236 | struct nvgpu_ctxsw_trace_filter_args *args) | 236 | struct nvgpu_ctxsw_trace_filter_args *args) |
237 | { | 237 | { |
238 | mutex_lock(&dev->write_lock); | 238 | nvgpu_mutex_acquire(&dev->write_lock); |
239 | args->filter = dev->filter; | 239 | args->filter = dev->filter; |
240 | mutex_unlock(&dev->write_lock); | 240 | nvgpu_mutex_release(&dev->write_lock); |
241 | 241 | ||
242 | return 0; | 242 | return 0; |
243 | } | 243 | } |
@@ -293,7 +293,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp) | |||
293 | 293 | ||
294 | /* Allow only one user for this device */ | 294 | /* Allow only one user for this device */ |
295 | dev = &trace->devs[vmid]; | 295 | dev = &trace->devs[vmid]; |
296 | mutex_lock(&dev->write_lock); | 296 | nvgpu_mutex_acquire(&dev->write_lock); |
297 | if (dev->hdr) { | 297 | if (dev->hdr) { |
298 | err = -EBUSY; | 298 | err = -EBUSY; |
299 | goto done; | 299 | goto done; |
@@ -321,7 +321,7 @@ int gk20a_ctxsw_dev_open(struct inode *inode, struct file *filp) | |||
321 | } | 321 | } |
322 | 322 | ||
323 | done: | 323 | done: |
324 | mutex_unlock(&dev->write_lock); | 324 | nvgpu_mutex_release(&dev->write_lock); |
325 | 325 | ||
326 | idle: | 326 | idle: |
327 | gk20a_idle(g->dev); | 327 | gk20a_idle(g->dev); |
@@ -338,9 +338,9 @@ int gk20a_ctxsw_dev_release(struct inode *inode, struct file *filp) | |||
338 | 338 | ||
339 | g->ops.fecs_trace.disable(g); | 339 | g->ops.fecs_trace.disable(g); |
340 | 340 | ||
341 | mutex_lock(&dev->write_lock); | 341 | nvgpu_mutex_acquire(&dev->write_lock); |
342 | dev->write_enabled = false; | 342 | dev->write_enabled = false; |
343 | mutex_unlock(&dev->write_lock); | 343 | nvgpu_mutex_release(&dev->write_lock); |
344 | 344 | ||
345 | if (dev->hdr) { | 345 | if (dev->hdr) { |
346 | dev->g->ops.fecs_trace.free_user_buffer(dev->g); | 346 | dev->g->ops.fecs_trace.free_user_buffer(dev->g); |
@@ -414,11 +414,11 @@ unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait) | |||
414 | 414 | ||
415 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); | 415 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); |
416 | 416 | ||
417 | mutex_lock(&dev->write_lock); | 417 | nvgpu_mutex_acquire(&dev->write_lock); |
418 | poll_wait(filp, &dev->readout_wq, wait); | 418 | poll_wait(filp, &dev->readout_wq, wait); |
419 | if (!ring_is_empty(hdr)) | 419 | if (!ring_is_empty(hdr)) |
420 | mask |= POLLIN | POLLRDNORM; | 420 | mask |= POLLIN | POLLRDNORM; |
421 | mutex_unlock(&dev->write_lock); | 421 | nvgpu_mutex_release(&dev->write_lock); |
422 | 422 | ||
423 | return mask; | 423 | return mask; |
424 | } | 424 | } |
@@ -482,7 +482,7 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g) | |||
482 | dev->hdr = NULL; | 482 | dev->hdr = NULL; |
483 | dev->write_enabled = false; | 483 | dev->write_enabled = false; |
484 | init_waitqueue_head(&dev->readout_wq); | 484 | init_waitqueue_head(&dev->readout_wq); |
485 | mutex_init(&dev->write_lock); | 485 | nvgpu_mutex_init(&dev->write_lock); |
486 | atomic_set(&dev->vma_ref, 0); | 486 | atomic_set(&dev->vma_ref, 0); |
487 | dev++; | 487 | dev++; |
488 | } | 488 | } |
@@ -567,7 +567,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g, | |||
567 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, | 567 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, |
568 | "dev=%p hdr=%p", dev, hdr); | 568 | "dev=%p hdr=%p", dev, hdr); |
569 | 569 | ||
570 | mutex_lock(&dev->write_lock); | 570 | nvgpu_mutex_acquire(&dev->write_lock); |
571 | 571 | ||
572 | if (unlikely(!hdr)) { | 572 | if (unlikely(!hdr)) { |
573 | /* device has been released */ | 573 | /* device has been released */ |
@@ -621,7 +621,7 @@ int gk20a_ctxsw_trace_write(struct gk20a *g, | |||
621 | gk20a_dbg(gpu_dbg_ctxsw, "added: read=%d write=%d len=%d", | 621 | gk20a_dbg(gpu_dbg_ctxsw, "added: read=%d write=%d len=%d", |
622 | hdr->read_idx, hdr->write_idx, ring_len(hdr)); | 622 | hdr->read_idx, hdr->write_idx, ring_len(hdr)); |
623 | 623 | ||
624 | mutex_unlock(&dev->write_lock); | 624 | nvgpu_mutex_release(&dev->write_lock); |
625 | return ret; | 625 | return ret; |
626 | 626 | ||
627 | disable: | 627 | disable: |
@@ -638,7 +638,7 @@ filter: | |||
638 | entry->tag, entry->timestamp, reason); | 638 | entry->tag, entry->timestamp, reason); |
639 | 639 | ||
640 | done: | 640 | done: |
641 | mutex_unlock(&dev->write_lock); | 641 | nvgpu_mutex_release(&dev->write_lock); |
642 | return ret; | 642 | return ret; |
643 | } | 643 | } |
644 | 644 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index ac11e378..f6290e1d 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | |||
@@ -44,9 +44,9 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s) | |||
44 | struct channel_gk20a *ch; | 44 | struct channel_gk20a *ch; |
45 | struct gk20a *g = dbg_s->g; | 45 | struct gk20a *g = dbg_s->g; |
46 | 46 | ||
47 | mutex_lock(&dbg_s->ch_list_lock); | 47 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
48 | if (list_empty(&dbg_s->ch_list)) { | 48 | if (list_empty(&dbg_s->ch_list)) { |
49 | mutex_unlock(&dbg_s->ch_list_lock); | 49 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
50 | return NULL; | 50 | return NULL; |
51 | } | 51 | } |
52 | 52 | ||
@@ -55,7 +55,7 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s) | |||
55 | ch_entry); | 55 | ch_entry); |
56 | ch = g->fifo.channel + ch_data->chid; | 56 | ch = g->fifo.channel + ch_data->chid; |
57 | 57 | ||
58 | mutex_unlock(&dbg_s->ch_list_lock); | 58 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
59 | 59 | ||
60 | return ch; | 60 | return ch; |
61 | } | 61 | } |
@@ -116,8 +116,8 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, | |||
116 | 116 | ||
117 | init_waitqueue_head(&dbg_session->dbg_events.wait_queue); | 117 | init_waitqueue_head(&dbg_session->dbg_events.wait_queue); |
118 | INIT_LIST_HEAD(&dbg_session->ch_list); | 118 | INIT_LIST_HEAD(&dbg_session->ch_list); |
119 | mutex_init(&dbg_session->ch_list_lock); | 119 | nvgpu_mutex_init(&dbg_session->ch_list_lock); |
120 | mutex_init(&dbg_session->ioctl_lock); | 120 | nvgpu_mutex_init(&dbg_session->ioctl_lock); |
121 | dbg_session->dbg_events.events_enabled = false; | 121 | dbg_session->dbg_events.events_enabled = false; |
122 | dbg_session->dbg_events.num_pending_events = 0; | 122 | dbg_session->dbg_events.num_pending_events = 0; |
123 | 123 | ||
@@ -127,61 +127,61 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, | |||
127 | /* used in scenarios where the debugger session can take just the inter-session | 127 | /* used in scenarios where the debugger session can take just the inter-session |
128 | * lock for performance, but the profiler session must take the per-gpu lock | 128 | * lock for performance, but the profiler session must take the per-gpu lock |
129 | * since it might not have an associated channel. */ | 129 | * since it might not have an associated channel. */ |
130 | static void gk20a_dbg_session_mutex_lock(struct dbg_session_gk20a *dbg_s) | 130 | static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s) |
131 | { | 131 | { |
132 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 132 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
133 | 133 | ||
134 | if (dbg_s->is_profiler || !ch) | 134 | if (dbg_s->is_profiler || !ch) |
135 | mutex_lock(&dbg_s->g->dbg_sessions_lock); | 135 | nvgpu_mutex_acquire(&dbg_s->g->dbg_sessions_lock); |
136 | else | 136 | else |
137 | mutex_lock(&ch->dbg_s_lock); | 137 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
138 | } | 138 | } |
139 | 139 | ||
140 | static void gk20a_dbg_session_mutex_unlock(struct dbg_session_gk20a *dbg_s) | 140 | static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s) |
141 | { | 141 | { |
142 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 142 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
143 | 143 | ||
144 | if (dbg_s->is_profiler || !ch) | 144 | if (dbg_s->is_profiler || !ch) |
145 | mutex_unlock(&dbg_s->g->dbg_sessions_lock); | 145 | nvgpu_mutex_release(&dbg_s->g->dbg_sessions_lock); |
146 | else | 146 | else |
147 | mutex_unlock(&ch->dbg_s_lock); | 147 | nvgpu_mutex_release(&ch->dbg_s_lock); |
148 | } | 148 | } |
149 | 149 | ||
150 | static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) | 150 | static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) |
151 | { | 151 | { |
152 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 152 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); |
153 | 153 | ||
154 | gk20a_dbg_session_mutex_lock(dbg_s); | 154 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); |
155 | 155 | ||
156 | dbg_s->dbg_events.events_enabled = true; | 156 | dbg_s->dbg_events.events_enabled = true; |
157 | dbg_s->dbg_events.num_pending_events = 0; | 157 | dbg_s->dbg_events.num_pending_events = 0; |
158 | 158 | ||
159 | gk20a_dbg_session_mutex_unlock(dbg_s); | 159 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); |
160 | } | 160 | } |
161 | 161 | ||
162 | static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) | 162 | static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) |
163 | { | 163 | { |
164 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 164 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); |
165 | 165 | ||
166 | gk20a_dbg_session_mutex_lock(dbg_s); | 166 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); |
167 | 167 | ||
168 | dbg_s->dbg_events.events_enabled = false; | 168 | dbg_s->dbg_events.events_enabled = false; |
169 | dbg_s->dbg_events.num_pending_events = 0; | 169 | dbg_s->dbg_events.num_pending_events = 0; |
170 | 170 | ||
171 | gk20a_dbg_session_mutex_unlock(dbg_s); | 171 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); |
172 | } | 172 | } |
173 | 173 | ||
174 | static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) | 174 | static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) |
175 | { | 175 | { |
176 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 176 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); |
177 | 177 | ||
178 | gk20a_dbg_session_mutex_lock(dbg_s); | 178 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); |
179 | 179 | ||
180 | if (dbg_s->dbg_events.events_enabled && | 180 | if (dbg_s->dbg_events.events_enabled && |
181 | dbg_s->dbg_events.num_pending_events > 0) | 181 | dbg_s->dbg_events.num_pending_events > 0) |
182 | dbg_s->dbg_events.num_pending_events--; | 182 | dbg_s->dbg_events.num_pending_events--; |
183 | 183 | ||
184 | gk20a_dbg_session_mutex_unlock(dbg_s); | 184 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); |
185 | } | 185 | } |
186 | 186 | ||
187 | static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, | 187 | static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, |
@@ -232,7 +232,7 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait) | |||
232 | 232 | ||
233 | poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait); | 233 | poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait); |
234 | 234 | ||
235 | gk20a_dbg_session_mutex_lock(dbg_s); | 235 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); |
236 | 236 | ||
237 | if (dbg_s->dbg_events.events_enabled && | 237 | if (dbg_s->dbg_events.events_enabled && |
238 | dbg_s->dbg_events.num_pending_events > 0) { | 238 | dbg_s->dbg_events.num_pending_events > 0) { |
@@ -243,7 +243,7 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait) | |||
243 | mask = (POLLPRI | POLLIN); | 243 | mask = (POLLPRI | POLLIN); |
244 | } | 244 | } |
245 | 245 | ||
246 | gk20a_dbg_session_mutex_unlock(dbg_s); | 246 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); |
247 | 247 | ||
248 | return mask; | 248 | return mask; |
249 | } | 249 | } |
@@ -268,7 +268,7 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) | |||
268 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 268 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); |
269 | 269 | ||
270 | /* guard against the session list being modified */ | 270 | /* guard against the session list being modified */ |
271 | mutex_lock(&ch->dbg_s_lock); | 271 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
272 | 272 | ||
273 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { | 273 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { |
274 | dbg_s = session_data->dbg_s; | 274 | dbg_s = session_data->dbg_s; |
@@ -284,7 +284,7 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) | |||
284 | } | 284 | } |
285 | } | 285 | } |
286 | 286 | ||
287 | mutex_unlock(&ch->dbg_s_lock); | 287 | nvgpu_mutex_release(&ch->dbg_s_lock); |
288 | } | 288 | } |
289 | 289 | ||
290 | bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) | 290 | bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) |
@@ -296,7 +296,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
296 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 296 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); |
297 | 297 | ||
298 | /* guard against the session list being modified */ | 298 | /* guard against the session list being modified */ |
299 | mutex_lock(&ch->dbg_s_lock); | 299 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
300 | 300 | ||
301 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { | 301 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { |
302 | dbg_s = session_data->dbg_s; | 302 | dbg_s = session_data->dbg_s; |
@@ -308,7 +308,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
308 | } | 308 | } |
309 | } | 309 | } |
310 | 310 | ||
311 | mutex_unlock(&ch->dbg_s_lock); | 311 | nvgpu_mutex_release(&ch->dbg_s_lock); |
312 | 312 | ||
313 | return broadcast; | 313 | return broadcast; |
314 | } | 314 | } |
@@ -321,7 +321,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
321 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 321 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); |
322 | 322 | ||
323 | /* guard against the session list being modified */ | 323 | /* guard against the session list being modified */ |
324 | mutex_lock(&ch->dbg_s_lock); | 324 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
325 | 325 | ||
326 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { | 326 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { |
327 | dbg_s = session_data->dbg_s; | 327 | dbg_s = session_data->dbg_s; |
@@ -332,7 +332,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
332 | } | 332 | } |
333 | } | 333 | } |
334 | 334 | ||
335 | mutex_unlock(&ch->dbg_s_lock); | 335 | nvgpu_mutex_release(&ch->dbg_s_lock); |
336 | 336 | ||
337 | return 0; | 337 | return 0; |
338 | } | 338 | } |
@@ -407,12 +407,12 @@ static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s) | |||
407 | struct dbg_session_channel_data *ch_data, *tmp; | 407 | struct dbg_session_channel_data *ch_data, *tmp; |
408 | struct gk20a *g = dbg_s->g; | 408 | struct gk20a *g = dbg_s->g; |
409 | 409 | ||
410 | mutex_lock(&g->dbg_sessions_lock); | 410 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
411 | mutex_lock(&dbg_s->ch_list_lock); | 411 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
412 | list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, ch_entry) | 412 | list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, ch_entry) |
413 | dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | 413 | dbg_unbind_single_channel_gk20a(dbg_s, ch_data); |
414 | mutex_unlock(&dbg_s->ch_list_lock); | 414 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
415 | mutex_unlock(&g->dbg_sessions_lock); | 415 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
416 | 416 | ||
417 | return 0; | 417 | return 0; |
418 | } | 418 | } |
@@ -435,25 +435,25 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | |||
435 | return -EINVAL; | 435 | return -EINVAL; |
436 | } | 436 | } |
437 | 437 | ||
438 | mutex_lock(&dbg_s->ch_list_lock); | 438 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
439 | list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { | 439 | list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { |
440 | if (ch->hw_chid == ch_data->chid) { | 440 | if (ch->hw_chid == ch_data->chid) { |
441 | channel_found = true; | 441 | channel_found = true; |
442 | break; | 442 | break; |
443 | } | 443 | } |
444 | } | 444 | } |
445 | mutex_unlock(&dbg_s->ch_list_lock); | 445 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
446 | 446 | ||
447 | if (!channel_found) { | 447 | if (!channel_found) { |
448 | gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); | 448 | gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); |
449 | return -EINVAL; | 449 | return -EINVAL; |
450 | } | 450 | } |
451 | 451 | ||
452 | mutex_lock(&g->dbg_sessions_lock); | 452 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
453 | mutex_lock(&dbg_s->ch_list_lock); | 453 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
454 | err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | 454 | err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data); |
455 | mutex_unlock(&dbg_s->ch_list_lock); | 455 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
456 | mutex_unlock(&g->dbg_sessions_lock); | 456 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
457 | 457 | ||
458 | return err; | 458 | return err; |
459 | } | 459 | } |
@@ -472,11 +472,11 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp) | |||
472 | * which called powergate/timeout disable ioctl, to be killed without | 472 | * which called powergate/timeout disable ioctl, to be killed without |
473 | * calling powergate/timeout enable ioctl | 473 | * calling powergate/timeout enable ioctl |
474 | */ | 474 | */ |
475 | mutex_lock(&g->dbg_sessions_lock); | 475 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
476 | g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, | 476 | g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, |
477 | NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE); | 477 | NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE); |
478 | nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE); | 478 | nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE); |
479 | mutex_unlock(&g->dbg_sessions_lock); | 479 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
480 | 480 | ||
481 | kfree(dbg_s); | 481 | kfree(dbg_s); |
482 | return 0; | 482 | return 0; |
@@ -510,8 +510,8 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | |||
510 | 510 | ||
511 | gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid); | 511 | gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid); |
512 | 512 | ||
513 | mutex_lock(&g->dbg_sessions_lock); | 513 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
514 | mutex_lock(&ch->dbg_s_lock); | 514 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
515 | 515 | ||
516 | ch_data = kzalloc(sizeof(*ch_data), GFP_KERNEL); | 516 | ch_data = kzalloc(sizeof(*ch_data), GFP_KERNEL); |
517 | if (!ch_data) { | 517 | if (!ch_data) { |
@@ -535,12 +535,12 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | |||
535 | 535 | ||
536 | list_add(&session_data->dbg_s_entry, &ch->dbg_s_list); | 536 | list_add(&session_data->dbg_s_entry, &ch->dbg_s_list); |
537 | 537 | ||
538 | mutex_lock(&dbg_s->ch_list_lock); | 538 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
539 | list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list); | 539 | list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list); |
540 | mutex_unlock(&dbg_s->ch_list_lock); | 540 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
541 | 541 | ||
542 | mutex_unlock(&ch->dbg_s_lock); | 542 | nvgpu_mutex_release(&ch->dbg_s_lock); |
543 | mutex_unlock(&g->dbg_sessions_lock); | 543 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
544 | 544 | ||
545 | return 0; | 545 | return 0; |
546 | } | 546 | } |
@@ -591,9 +591,9 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s, | |||
591 | 591 | ||
592 | gk20a_dbg_fn("powergate mode = %d", args->enable); | 592 | gk20a_dbg_fn("powergate mode = %d", args->enable); |
593 | 593 | ||
594 | mutex_lock(&g->dbg_sessions_lock); | 594 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
595 | err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); | 595 | err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); |
596 | mutex_unlock(&g->dbg_sessions_lock); | 596 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
597 | 597 | ||
598 | return err; | 598 | return err; |
599 | } | 599 | } |
@@ -604,9 +604,9 @@ static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s, | |||
604 | int status; | 604 | int status; |
605 | struct gk20a *g = get_gk20a(dbg_s->dev); | 605 | struct gk20a *g = get_gk20a(dbg_s->dev); |
606 | 606 | ||
607 | mutex_lock(&g->dbg_sessions_lock); | 607 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
608 | status = g->timeouts_enabled; | 608 | status = g->timeouts_enabled; |
609 | mutex_unlock(&g->dbg_sessions_lock); | 609 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
610 | 610 | ||
611 | if (status) | 611 | if (status) |
612 | args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE; | 612 | args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE; |
@@ -620,11 +620,11 @@ static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type( | |||
620 | { | 620 | { |
621 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 621 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); |
622 | 622 | ||
623 | gk20a_dbg_session_mutex_lock(dbg_s); | 623 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); |
624 | 624 | ||
625 | dbg_s->broadcast_stop_trigger = (args->broadcast != 0); | 625 | dbg_s->broadcast_stop_trigger = (args->broadcast != 0); |
626 | 626 | ||
627 | gk20a_dbg_session_mutex_unlock(dbg_s); | 627 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); |
628 | 628 | ||
629 | return 0; | 629 | return 0; |
630 | } | 630 | } |
@@ -651,12 +651,12 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state( | |||
651 | if (write_size > args->sm_error_state_record_size) | 651 | if (write_size > args->sm_error_state_record_size) |
652 | write_size = args->sm_error_state_record_size; | 652 | write_size = args->sm_error_state_record_size; |
653 | 653 | ||
654 | mutex_lock(&g->dbg_sessions_lock); | 654 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
655 | err = copy_to_user((void __user *)(uintptr_t) | 655 | err = copy_to_user((void __user *)(uintptr_t) |
656 | args->sm_error_state_record_mem, | 656 | args->sm_error_state_record_mem, |
657 | sm_error_state, | 657 | sm_error_state, |
658 | write_size); | 658 | write_size); |
659 | mutex_unlock(&g->dbg_sessions_lock); | 659 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
660 | if (err) { | 660 | if (err) { |
661 | gk20a_err(dev_from_gk20a(g), "copy_to_user failed!\n"); | 661 | gk20a_err(dev_from_gk20a(g), "copy_to_user failed!\n"); |
662 | return err; | 662 | return err; |
@@ -728,12 +728,12 @@ static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state( | |||
728 | if (read_size > args->sm_error_state_record_size) | 728 | if (read_size > args->sm_error_state_record_size) |
729 | read_size = args->sm_error_state_record_size; | 729 | read_size = args->sm_error_state_record_size; |
730 | 730 | ||
731 | mutex_lock(&g->dbg_sessions_lock); | 731 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
732 | err = copy_from_user(sm_error_state, | 732 | err = copy_from_user(sm_error_state, |
733 | (void __user *)(uintptr_t) | 733 | (void __user *)(uintptr_t) |
734 | args->sm_error_state_record_mem, | 734 | args->sm_error_state_record_mem, |
735 | read_size); | 735 | read_size); |
736 | mutex_unlock(&g->dbg_sessions_lock); | 736 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
737 | if (err) { | 737 | if (err) { |
738 | err = -ENOMEM; | 738 | err = -ENOMEM; |
739 | goto err_free; | 739 | goto err_free; |
@@ -901,7 +901,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, | |||
901 | } | 901 | } |
902 | 902 | ||
903 | /* protect from threaded user space calls */ | 903 | /* protect from threaded user space calls */ |
904 | mutex_lock(&dbg_s->ioctl_lock); | 904 | nvgpu_mutex_acquire(&dbg_s->ioctl_lock); |
905 | 905 | ||
906 | switch (cmd) { | 906 | switch (cmd) { |
907 | case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL: | 907 | case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL: |
@@ -1007,7 +1007,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, | |||
1007 | break; | 1007 | break; |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | mutex_unlock(&dbg_s->ioctl_lock); | 1010 | nvgpu_mutex_release(&dbg_s->ioctl_lock); |
1011 | 1011 | ||
1012 | gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); | 1012 | gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); |
1013 | 1013 | ||
@@ -1032,9 +1032,9 @@ static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s, | |||
1032 | { | 1032 | { |
1033 | int err; | 1033 | int err; |
1034 | 1034 | ||
1035 | mutex_lock(&gr->ctx_mutex); | 1035 | nvgpu_mutex_acquire(&gr->ctx_mutex); |
1036 | err = !gr->ctx_vars.golden_image_initialized; | 1036 | err = !gr->ctx_vars.golden_image_initialized; |
1037 | mutex_unlock(&gr->ctx_mutex); | 1037 | nvgpu_mutex_release(&gr->ctx_mutex); |
1038 | if (err) | 1038 | if (err) |
1039 | return false; | 1039 | return false; |
1040 | return true; | 1040 | return true; |
@@ -1089,7 +1089,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | |||
1089 | /* since exec_reg_ops sends methods to the ucode, it must take the | 1089 | /* since exec_reg_ops sends methods to the ucode, it must take the |
1090 | * global gpu lock to protect against mixing methods from debug sessions | 1090 | * global gpu lock to protect against mixing methods from debug sessions |
1091 | * on other channels */ | 1091 | * on other channels */ |
1092 | mutex_lock(&g->dbg_sessions_lock); | 1092 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1093 | 1093 | ||
1094 | if (!dbg_s->is_pg_disabled && !gk20a_gpu_is_virtual(dbg_s->dev)) { | 1094 | if (!dbg_s->is_pg_disabled && !gk20a_gpu_is_virtual(dbg_s->dev)) { |
1095 | /* In the virtual case, the server will handle | 1095 | /* In the virtual case, the server will handle |
@@ -1150,7 +1150,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | |||
1150 | } | 1150 | } |
1151 | } | 1151 | } |
1152 | 1152 | ||
1153 | mutex_unlock(&g->dbg_sessions_lock); | 1153 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1154 | 1154 | ||
1155 | if (!err && powergate_err) | 1155 | if (!err && powergate_err) |
1156 | err = powergate_err; | 1156 | err = powergate_err; |
@@ -1276,9 +1276,9 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, | |||
1276 | gk20a_dbg_fn("%s powergate mode = %d", | 1276 | gk20a_dbg_fn("%s powergate mode = %d", |
1277 | dev_name(dbg_s->dev), args->mode); | 1277 | dev_name(dbg_s->dev), args->mode); |
1278 | 1278 | ||
1279 | mutex_lock(&g->dbg_sessions_lock); | 1279 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1280 | err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, args->mode); | 1280 | err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, args->mode); |
1281 | mutex_unlock(&g->dbg_sessions_lock); | 1281 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1282 | return err; | 1282 | return err; |
1283 | } | 1283 | } |
1284 | 1284 | ||
@@ -1299,7 +1299,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | /* Take the global lock, since we'll be doing global regops */ | 1301 | /* Take the global lock, since we'll be doing global regops */ |
1302 | mutex_lock(&g->dbg_sessions_lock); | 1302 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1303 | 1303 | ||
1304 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 1304 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
1305 | if (!ch_gk20a) { | 1305 | if (!ch_gk20a) { |
@@ -1319,7 +1319,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1319 | 1319 | ||
1320 | err = g->ops.regops.apply_smpc_war(dbg_s); | 1320 | err = g->ops.regops.apply_smpc_war(dbg_s); |
1321 | clean_up: | 1321 | clean_up: |
1322 | mutex_unlock(&g->dbg_sessions_lock); | 1322 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1323 | gk20a_idle(g->dev); | 1323 | gk20a_idle(g->dev); |
1324 | return err; | 1324 | return err; |
1325 | } | 1325 | } |
@@ -1341,7 +1341,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | /* Take the global lock, since we'll be doing global regops */ | 1343 | /* Take the global lock, since we'll be doing global regops */ |
1344 | mutex_lock(&g->dbg_sessions_lock); | 1344 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1345 | 1345 | ||
1346 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 1346 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
1347 | if (!ch_gk20a) { | 1347 | if (!ch_gk20a) { |
@@ -1361,7 +1361,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1361 | * added here with gk20a being deprecated | 1361 | * added here with gk20a being deprecated |
1362 | */ | 1362 | */ |
1363 | clean_up: | 1363 | clean_up: |
1364 | mutex_unlock(&g->dbg_sessions_lock); | 1364 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1365 | gk20a_idle(g->dev); | 1365 | gk20a_idle(g->dev); |
1366 | return err; | 1366 | return err; |
1367 | } | 1367 | } |
@@ -1386,7 +1386,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | |||
1386 | return err; | 1386 | return err; |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | mutex_lock(&g->dbg_sessions_lock); | 1389 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1390 | 1390 | ||
1391 | /* Suspend GPU context switching */ | 1391 | /* Suspend GPU context switching */ |
1392 | err = gr_gk20a_disable_ctxsw(g); | 1392 | err = gr_gk20a_disable_ctxsw(g); |
@@ -1411,7 +1411,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | |||
1411 | gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); | 1411 | gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); |
1412 | 1412 | ||
1413 | clean_up: | 1413 | clean_up: |
1414 | mutex_unlock(&g->dbg_sessions_lock); | 1414 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1415 | gk20a_idle(g->dev); | 1415 | gk20a_idle(g->dev); |
1416 | 1416 | ||
1417 | return err; | 1417 | return err; |
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h index 773a669c..caa9395b 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h | |||
@@ -67,14 +67,14 @@ struct dbg_session_gk20a { | |||
67 | 67 | ||
68 | /* list of bound channels, if any */ | 68 | /* list of bound channels, if any */ |
69 | struct list_head ch_list; | 69 | struct list_head ch_list; |
70 | struct mutex ch_list_lock; | 70 | struct nvgpu_mutex ch_list_lock; |
71 | 71 | ||
72 | /* event support */ | 72 | /* event support */ |
73 | struct dbg_gpu_session_events dbg_events; | 73 | struct dbg_gpu_session_events dbg_events; |
74 | 74 | ||
75 | bool broadcast_stop_trigger; | 75 | bool broadcast_stop_trigger; |
76 | 76 | ||
77 | struct mutex ioctl_lock; | 77 | struct nvgpu_mutex ioctl_lock; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | struct dbg_session_data { | 80 | struct dbg_session_data { |
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c index d20229b3..4b8e61c4 100644 --- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c | |||
@@ -421,7 +421,7 @@ void gk20a_debug_init(struct device *dev, const char *debugfs_symlink) | |||
421 | #endif | 421 | #endif |
422 | 422 | ||
423 | #ifdef CONFIG_DEBUG_FS | 423 | #ifdef CONFIG_DEBUG_FS |
424 | spin_lock_init(&g->debugfs_lock); | 424 | nvgpu_spinlock_init(&g->debugfs_lock); |
425 | 425 | ||
426 | g->mm.ltc_enabled = true; | 426 | g->mm.ltc_enabled = true; |
427 | g->mm.ltc_enabled_debug = true; | 427 | g->mm.ltc_enabled_debug = true; |
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index 1f86fd8f..8244403e 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | |||
@@ -60,8 +60,8 @@ struct gk20a_fecs_trace { | |||
60 | 60 | ||
61 | struct mem_desc trace_buf; | 61 | struct mem_desc trace_buf; |
62 | DECLARE_HASHTABLE(pid_hash_table, GK20A_FECS_TRACE_HASH_BITS); | 62 | DECLARE_HASHTABLE(pid_hash_table, GK20A_FECS_TRACE_HASH_BITS); |
63 | struct mutex hash_lock; | 63 | struct nvgpu_mutex hash_lock; |
64 | struct mutex poll_lock; | 64 | struct nvgpu_mutex poll_lock; |
65 | struct task_struct *poll_task; | 65 | struct task_struct *poll_task; |
66 | }; | 66 | }; |
67 | 67 | ||
@@ -133,14 +133,14 @@ void gk20a_fecs_trace_hash_dump(struct gk20a *g) | |||
133 | 133 | ||
134 | gk20a_dbg(gpu_dbg_ctxsw, "dumping hash table"); | 134 | gk20a_dbg(gpu_dbg_ctxsw, "dumping hash table"); |
135 | 135 | ||
136 | mutex_lock(&trace->hash_lock); | 136 | nvgpu_mutex_acquire(&trace->hash_lock); |
137 | hash_for_each(trace->pid_hash_table, bkt, ent, node) | 137 | hash_for_each(trace->pid_hash_table, bkt, ent, node) |
138 | { | 138 | { |
139 | gk20a_dbg(gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", | 139 | gk20a_dbg(gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", |
140 | ent, bkt, ent->context_ptr, ent->pid); | 140 | ent, bkt, ent->context_ptr, ent->pid); |
141 | 141 | ||
142 | } | 142 | } |
143 | mutex_unlock(&trace->hash_lock); | 143 | nvgpu_mutex_release(&trace->hash_lock); |
144 | } | 144 | } |
145 | 145 | ||
146 | static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid) | 146 | static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid) |
@@ -161,9 +161,9 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid | |||
161 | 161 | ||
162 | he->context_ptr = context_ptr; | 162 | he->context_ptr = context_ptr; |
163 | he->pid = pid; | 163 | he->pid = pid; |
164 | mutex_lock(&trace->hash_lock); | 164 | nvgpu_mutex_acquire(&trace->hash_lock); |
165 | hash_add(trace->pid_hash_table, &he->node, context_ptr); | 165 | hash_add(trace->pid_hash_table, &he->node, context_ptr); |
166 | mutex_unlock(&trace->hash_lock); | 166 | nvgpu_mutex_release(&trace->hash_lock); |
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
169 | 169 | ||
@@ -176,7 +176,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) | |||
176 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, | 176 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, |
177 | "freeing hash entry context_ptr=%x", context_ptr); | 177 | "freeing hash entry context_ptr=%x", context_ptr); |
178 | 178 | ||
179 | mutex_lock(&trace->hash_lock); | 179 | nvgpu_mutex_acquire(&trace->hash_lock); |
180 | hash_for_each_possible_safe(trace->pid_hash_table, ent, tmp, node, | 180 | hash_for_each_possible_safe(trace->pid_hash_table, ent, tmp, node, |
181 | context_ptr) { | 181 | context_ptr) { |
182 | if (ent->context_ptr == context_ptr) { | 182 | if (ent->context_ptr == context_ptr) { |
@@ -188,7 +188,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr) | |||
188 | break; | 188 | break; |
189 | } | 189 | } |
190 | } | 190 | } |
191 | mutex_unlock(&trace->hash_lock); | 191 | nvgpu_mutex_release(&trace->hash_lock); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void gk20a_fecs_trace_free_hash_table(struct gk20a *g) | 194 | static void gk20a_fecs_trace_free_hash_table(struct gk20a *g) |
@@ -200,12 +200,12 @@ static void gk20a_fecs_trace_free_hash_table(struct gk20a *g) | |||
200 | 200 | ||
201 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); | 201 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); |
202 | 202 | ||
203 | mutex_lock(&trace->hash_lock); | 203 | nvgpu_mutex_acquire(&trace->hash_lock); |
204 | hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { | 204 | hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { |
205 | hash_del(&ent->node); | 205 | hash_del(&ent->node); |
206 | kfree(ent); | 206 | kfree(ent); |
207 | } | 207 | } |
208 | mutex_unlock(&trace->hash_lock); | 208 | nvgpu_mutex_release(&trace->hash_lock); |
209 | 209 | ||
210 | } | 210 | } |
211 | 211 | ||
@@ -215,7 +215,7 @@ static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr) | |||
215 | struct gk20a_fecs_trace *trace = g->fecs_trace; | 215 | struct gk20a_fecs_trace *trace = g->fecs_trace; |
216 | pid_t pid = 0; | 216 | pid_t pid = 0; |
217 | 217 | ||
218 | mutex_lock(&trace->hash_lock); | 218 | nvgpu_mutex_acquire(&trace->hash_lock); |
219 | hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { | 219 | hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { |
220 | if (ent->context_ptr == context_ptr) { | 220 | if (ent->context_ptr == context_ptr) { |
221 | gk20a_dbg(gpu_dbg_ctxsw, | 221 | gk20a_dbg(gpu_dbg_ctxsw, |
@@ -225,7 +225,7 @@ static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr) | |||
225 | break; | 225 | break; |
226 | } | 226 | } |
227 | } | 227 | } |
228 | mutex_unlock(&trace->hash_lock); | 228 | nvgpu_mutex_release(&trace->hash_lock); |
229 | 229 | ||
230 | return pid; | 230 | return pid; |
231 | } | 231 | } |
@@ -336,7 +336,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g) | |||
336 | if (unlikely(err)) | 336 | if (unlikely(err)) |
337 | return err; | 337 | return err; |
338 | 338 | ||
339 | mutex_lock(&trace->poll_lock); | 339 | nvgpu_mutex_acquire(&trace->poll_lock); |
340 | write = gk20a_fecs_trace_get_write_index(g); | 340 | write = gk20a_fecs_trace_get_write_index(g); |
341 | if (unlikely((write < 0) || (write >= GK20A_FECS_TRACE_NUM_RECORDS))) { | 341 | if (unlikely((write < 0) || (write >= GK20A_FECS_TRACE_NUM_RECORDS))) { |
342 | gk20a_err(dev_from_gk20a(g), | 342 | gk20a_err(dev_from_gk20a(g), |
@@ -371,7 +371,7 @@ static int gk20a_fecs_trace_poll(struct gk20a *g) | |||
371 | gk20a_fecs_trace_set_read_index(g, read); | 371 | gk20a_fecs_trace_set_read_index(g, read); |
372 | 372 | ||
373 | done: | 373 | done: |
374 | mutex_unlock(&trace->poll_lock); | 374 | nvgpu_mutex_release(&trace->poll_lock); |
375 | gk20a_idle(g->dev); | 375 | gk20a_idle(g->dev); |
376 | return err; | 376 | return err; |
377 | } | 377 | } |
@@ -580,8 +580,8 @@ static int gk20a_fecs_trace_init(struct gk20a *g) | |||
580 | goto clean; | 580 | goto clean; |
581 | } | 581 | } |
582 | 582 | ||
583 | mutex_init(&trace->poll_lock); | 583 | nvgpu_mutex_init(&trace->poll_lock); |
584 | mutex_init(&trace->hash_lock); | 584 | nvgpu_mutex_init(&trace->hash_lock); |
585 | hash_init(trace->pid_hash_table); | 585 | hash_init(trace->pid_hash_table); |
586 | 586 | ||
587 | gk20a_fecs_trace_debugfs_init(g); | 587 | gk20a_fecs_trace_debugfs_init(g); |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 4a32194c..c245f4a2 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -477,7 +477,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) | |||
477 | kfree(runlist->active_tsgs); | 477 | kfree(runlist->active_tsgs); |
478 | runlist->active_tsgs = NULL; | 478 | runlist->active_tsgs = NULL; |
479 | 479 | ||
480 | mutex_destroy(&runlist->mutex); | 480 | nvgpu_mutex_destroy(&runlist->mutex); |
481 | 481 | ||
482 | } | 482 | } |
483 | memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * | 483 | memset(f->runlist_info, 0, (sizeof(struct fifo_runlist_info_gk20a) * |
@@ -650,7 +650,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
650 | goto clean_up_runlist; | 650 | goto clean_up_runlist; |
651 | } | 651 | } |
652 | } | 652 | } |
653 | mutex_init(&runlist->mutex); | 653 | nvgpu_mutex_init(&runlist->mutex); |
654 | 654 | ||
655 | /* None of buffers is pinned if this value doesn't change. | 655 | /* None of buffers is pinned if this value doesn't change. |
656 | Otherwise, one of them (cur_buffer) must have been pinned. */ | 656 | Otherwise, one of them (cur_buffer) must have been pinned. */ |
@@ -809,8 +809,8 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
809 | 809 | ||
810 | f->g = g; | 810 | f->g = g; |
811 | 811 | ||
812 | mutex_init(&f->intr.isr.mutex); | 812 | nvgpu_mutex_init(&f->intr.isr.mutex); |
813 | mutex_init(&f->gr_reset_mutex); | 813 | nvgpu_mutex_init(&f->gr_reset_mutex); |
814 | gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */ | 814 | gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */ |
815 | 815 | ||
816 | f->num_channels = g->ops.fifo.get_num_fifos(g); | 816 | f->num_channels = g->ops.fifo.get_num_fifos(g); |
@@ -846,7 +846,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
846 | init_runlist(g, f); | 846 | init_runlist(g, f); |
847 | 847 | ||
848 | INIT_LIST_HEAD(&f->free_chs); | 848 | INIT_LIST_HEAD(&f->free_chs); |
849 | mutex_init(&f->free_chs_mutex); | 849 | nvgpu_mutex_init(&f->free_chs_mutex); |
850 | 850 | ||
851 | if (g->ops.mm.is_bar1_supported(g)) | 851 | if (g->ops.mm.is_bar1_supported(g)) |
852 | err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm, | 852 | err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm, |
@@ -871,12 +871,12 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
871 | gk20a_init_channel_support(g, chid); | 871 | gk20a_init_channel_support(g, chid); |
872 | gk20a_init_tsg_support(g, chid); | 872 | gk20a_init_tsg_support(g, chid); |
873 | } | 873 | } |
874 | mutex_init(&f->tsg_inuse_mutex); | 874 | nvgpu_mutex_init(&f->tsg_inuse_mutex); |
875 | 875 | ||
876 | f->remove_support = gk20a_remove_fifo_support; | 876 | f->remove_support = gk20a_remove_fifo_support; |
877 | 877 | ||
878 | f->deferred_reset_pending = false; | 878 | f->deferred_reset_pending = false; |
879 | mutex_init(&f->deferred_reset_mutex); | 879 | nvgpu_mutex_init(&f->deferred_reset_mutex); |
880 | 880 | ||
881 | f->sw_ready = true; | 881 | f->sw_ready = true; |
882 | 882 | ||
@@ -1224,7 +1224,7 @@ static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g, | |||
1224 | if (!ch) | 1224 | if (!ch) |
1225 | return verbose; | 1225 | return verbose; |
1226 | 1226 | ||
1227 | mutex_lock(&ch->error_notifier_mutex); | 1227 | nvgpu_mutex_acquire(&ch->error_notifier_mutex); |
1228 | if (ch->error_notifier_ref) { | 1228 | if (ch->error_notifier_ref) { |
1229 | u32 err = ch->error_notifier->info32; | 1229 | u32 err = ch->error_notifier->info32; |
1230 | if (ch->error_notifier->status == 0xffff) { | 1230 | if (ch->error_notifier->status == 0xffff) { |
@@ -1240,7 +1240,7 @@ static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g, | |||
1240 | NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); | 1240 | NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); |
1241 | } | 1241 | } |
1242 | } | 1242 | } |
1243 | mutex_unlock(&ch->error_notifier_mutex); | 1243 | nvgpu_mutex_release(&ch->error_notifier_mutex); |
1244 | 1244 | ||
1245 | /* mark channel as faulted */ | 1245 | /* mark channel as faulted */ |
1246 | ch->has_timedout = true; | 1246 | ch->has_timedout = true; |
@@ -1309,7 +1309,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) | |||
1309 | { | 1309 | { |
1310 | u32 engine_id, engines; | 1310 | u32 engine_id, engines; |
1311 | 1311 | ||
1312 | mutex_lock(&g->dbg_sessions_lock); | 1312 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1313 | gr_gk20a_disable_ctxsw(g); | 1313 | gr_gk20a_disable_ctxsw(g); |
1314 | 1314 | ||
1315 | if (!g->fifo.deferred_reset_pending) | 1315 | if (!g->fifo.deferred_reset_pending) |
@@ -1336,7 +1336,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch) | |||
1336 | 1336 | ||
1337 | clean_up: | 1337 | clean_up: |
1338 | gr_gk20a_enable_ctxsw(g); | 1338 | gr_gk20a_enable_ctxsw(g); |
1339 | mutex_unlock(&g->dbg_sessions_lock); | 1339 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1340 | 1340 | ||
1341 | return 0; | 1341 | return 0; |
1342 | } | 1342 | } |
@@ -1487,9 +1487,9 @@ static bool gk20a_fifo_handle_mmu_fault( | |||
1487 | } else if (engine_id != FIFO_INVAL_ENGINE_ID) { | 1487 | } else if (engine_id != FIFO_INVAL_ENGINE_ID) { |
1488 | /* if lock is already taken, a reset is taking place | 1488 | /* if lock is already taken, a reset is taking place |
1489 | so no need to repeat */ | 1489 | so no need to repeat */ |
1490 | if (mutex_trylock(&g->fifo.gr_reset_mutex)) { | 1490 | if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) { |
1491 | gk20a_fifo_reset_engine(g, engine_id); | 1491 | gk20a_fifo_reset_engine(g, engine_id); |
1492 | mutex_unlock(&g->fifo.gr_reset_mutex); | 1492 | nvgpu_mutex_release(&g->fifo.gr_reset_mutex); |
1493 | } | 1493 | } |
1494 | } | 1494 | } |
1495 | 1495 | ||
@@ -1646,7 +1646,7 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose) | |||
1646 | 1646 | ||
1647 | /* stop context switching to prevent engine assignments from | 1647 | /* stop context switching to prevent engine assignments from |
1648 | changing until channel is recovered */ | 1648 | changing until channel is recovered */ |
1649 | mutex_lock(&g->dbg_sessions_lock); | 1649 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1650 | gr_gk20a_disable_ctxsw(g); | 1650 | gr_gk20a_disable_ctxsw(g); |
1651 | 1651 | ||
1652 | engines = gk20a_fifo_engines_on_id(g, hw_chid, false); | 1652 | engines = gk20a_fifo_engines_on_id(g, hw_chid, false); |
@@ -1667,7 +1667,7 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose) | |||
1667 | } | 1667 | } |
1668 | 1668 | ||
1669 | gr_gk20a_enable_ctxsw(g); | 1669 | gr_gk20a_enable_ctxsw(g); |
1670 | mutex_unlock(&g->dbg_sessions_lock); | 1670 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose) | 1673 | void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose) |
@@ -1676,7 +1676,7 @@ void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose) | |||
1676 | 1676 | ||
1677 | /* stop context switching to prevent engine assignments from | 1677 | /* stop context switching to prevent engine assignments from |
1678 | changing until TSG is recovered */ | 1678 | changing until TSG is recovered */ |
1679 | mutex_lock(&g->dbg_sessions_lock); | 1679 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
1680 | gr_gk20a_disable_ctxsw(g); | 1680 | gr_gk20a_disable_ctxsw(g); |
1681 | 1681 | ||
1682 | engines = gk20a_fifo_engines_on_id(g, tsgid, true); | 1682 | engines = gk20a_fifo_engines_on_id(g, tsgid, true); |
@@ -1693,7 +1693,7 @@ void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose) | |||
1693 | } | 1693 | } |
1694 | 1694 | ||
1695 | gr_gk20a_enable_ctxsw(g); | 1695 | gr_gk20a_enable_ctxsw(g); |
1696 | mutex_unlock(&g->dbg_sessions_lock); | 1696 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | 1699 | void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, |
@@ -2307,7 +2307,7 @@ void gk20a_fifo_isr(struct gk20a *g) | |||
2307 | if (g->fifo.sw_ready) { | 2307 | if (g->fifo.sw_ready) { |
2308 | /* note we're not actually in an "isr", but rather | 2308 | /* note we're not actually in an "isr", but rather |
2309 | * in a threaded interrupt context... */ | 2309 | * in a threaded interrupt context... */ |
2310 | mutex_lock(&g->fifo.intr.isr.mutex); | 2310 | nvgpu_mutex_acquire(&g->fifo.intr.isr.mutex); |
2311 | 2311 | ||
2312 | gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); | 2312 | gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr); |
2313 | 2313 | ||
@@ -2322,7 +2322,7 @@ void gk20a_fifo_isr(struct gk20a *g) | |||
2322 | if (unlikely(fifo_intr & error_intr_mask)) | 2322 | if (unlikely(fifo_intr & error_intr_mask)) |
2323 | clear_intr = fifo_error_isr(g, fifo_intr); | 2323 | clear_intr = fifo_error_isr(g, fifo_intr); |
2324 | 2324 | ||
2325 | mutex_unlock(&g->fifo.intr.isr.mutex); | 2325 | nvgpu_mutex_release(&g->fifo.intr.isr.mutex); |
2326 | } | 2326 | } |
2327 | gk20a_writel(g, fifo_intr_0_r(), clear_intr); | 2327 | gk20a_writel(g, fifo_intr_0_r(), clear_intr); |
2328 | 2328 | ||
@@ -2434,7 +2434,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) | |||
2434 | 2434 | ||
2435 | /* we have no idea which runlist we are using. lock all */ | 2435 | /* we have no idea which runlist we are using. lock all */ |
2436 | for (i = 0; i < g->fifo.max_runlists; i++) | 2436 | for (i = 0; i < g->fifo.max_runlists; i++) |
2437 | mutex_lock(&f->runlist_info[i].mutex); | 2437 | nvgpu_mutex_acquire(&f->runlist_info[i].mutex); |
2438 | 2438 | ||
2439 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2439 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2440 | 2440 | ||
@@ -2444,7 +2444,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid) | |||
2444 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2444 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2445 | 2445 | ||
2446 | for (i = 0; i < g->fifo.max_runlists; i++) | 2446 | for (i = 0; i < g->fifo.max_runlists; i++) |
2447 | mutex_unlock(&f->runlist_info[i].mutex); | 2447 | nvgpu_mutex_release(&f->runlist_info[i].mutex); |
2448 | 2448 | ||
2449 | return ret; | 2449 | return ret; |
2450 | } | 2450 | } |
@@ -2461,7 +2461,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
2461 | 2461 | ||
2462 | /* we have no idea which runlist we are using. lock all */ | 2462 | /* we have no idea which runlist we are using. lock all */ |
2463 | for (i = 0; i < g->fifo.max_runlists; i++) | 2463 | for (i = 0; i < g->fifo.max_runlists; i++) |
2464 | mutex_lock(&f->runlist_info[i].mutex); | 2464 | nvgpu_mutex_acquire(&f->runlist_info[i].mutex); |
2465 | 2465 | ||
2466 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2466 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2467 | 2467 | ||
@@ -2471,7 +2471,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
2471 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 2471 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
2472 | 2472 | ||
2473 | for (i = 0; i < g->fifo.max_runlists; i++) | 2473 | for (i = 0; i < g->fifo.max_runlists; i++) |
2474 | mutex_unlock(&f->runlist_info[i].mutex); | 2474 | nvgpu_mutex_release(&f->runlist_info[i].mutex); |
2475 | 2475 | ||
2476 | return ret; | 2476 | return ret; |
2477 | } | 2477 | } |
@@ -3046,7 +3046,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid, | |||
3046 | 3046 | ||
3047 | runlist = &f->runlist_info[runlist_id]; | 3047 | runlist = &f->runlist_info[runlist_id]; |
3048 | 3048 | ||
3049 | mutex_lock(&runlist->mutex); | 3049 | nvgpu_mutex_acquire(&runlist->mutex); |
3050 | 3050 | ||
3051 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 3051 | mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
3052 | 3052 | ||
@@ -3056,7 +3056,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid, | |||
3056 | if (!mutex_ret) | 3056 | if (!mutex_ret) |
3057 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 3057 | pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
3058 | 3058 | ||
3059 | mutex_unlock(&runlist->mutex); | 3059 | nvgpu_mutex_release(&runlist->mutex); |
3060 | return ret; | 3060 | return ret; |
3061 | } | 3061 | } |
3062 | 3062 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index 20baf9de..1a248dba 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | |||
@@ -41,7 +41,7 @@ struct fifo_runlist_info_gk20a { | |||
41 | u32 total_entries; | 41 | u32 total_entries; |
42 | bool stopped; | 42 | bool stopped; |
43 | bool support_tsg; | 43 | bool support_tsg; |
44 | struct mutex mutex; /* protect channel preempt and runlist upate */ | 44 | struct nvgpu_mutex mutex; /* protect channel preempt and runlist update */ |
45 | }; | 45 | }; |
46 | 46 | ||
47 | enum { | 47 | enum { |
@@ -120,18 +120,18 @@ struct fifo_gk20a { | |||
120 | struct channel_gk20a *channel; | 120 | struct channel_gk20a *channel; |
121 | /* zero-kref'd channels here */ | 121 | /* zero-kref'd channels here */ |
122 | struct list_head free_chs; | 122 | struct list_head free_chs; |
123 | struct mutex free_chs_mutex; | 123 | struct nvgpu_mutex free_chs_mutex; |
124 | struct mutex gr_reset_mutex; | 124 | struct nvgpu_mutex gr_reset_mutex; |
125 | 125 | ||
126 | struct tsg_gk20a *tsg; | 126 | struct tsg_gk20a *tsg; |
127 | struct mutex tsg_inuse_mutex; | 127 | struct nvgpu_mutex tsg_inuse_mutex; |
128 | 128 | ||
129 | void (*remove_support)(struct fifo_gk20a *); | 129 | void (*remove_support)(struct fifo_gk20a *); |
130 | bool sw_ready; | 130 | bool sw_ready; |
131 | struct { | 131 | struct { |
132 | /* share info between isrs and non-isr code */ | 132 | /* share info between isrs and non-isr code */ |
133 | struct { | 133 | struct { |
134 | struct mutex mutex; | 134 | struct nvgpu_mutex mutex; |
135 | } isr; | 135 | } isr; |
136 | struct { | 136 | struct { |
137 | u32 device_fatal_0; | 137 | u32 device_fatal_0; |
@@ -147,7 +147,7 @@ struct fifo_gk20a { | |||
147 | 147 | ||
148 | unsigned long deferred_fault_engines; | 148 | unsigned long deferred_fault_engines; |
149 | bool deferred_reset_pending; | 149 | bool deferred_reset_pending; |
150 | struct mutex deferred_reset_mutex; | 150 | struct nvgpu_mutex deferred_reset_mutex; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | static inline const char *gk20a_fifo_interleave_level_name(u32 interleave_level) | 153 | static inline const char *gk20a_fifo_interleave_level_name(u32 interleave_level) |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index 79c3fd09..32570d3d 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/thermal.h> | 33 | #include <linux/thermal.h> |
34 | #include <asm/cacheflush.h> | 34 | #include <asm/cacheflush.h> |
35 | #include <linux/debugfs.h> | 35 | #include <linux/debugfs.h> |
36 | #include <linux/spinlock.h> | 36 | #include <nvgpu/lock.h> |
37 | #include <linux/clk/tegra.h> | 37 | #include <linux/clk/tegra.h> |
38 | #include <linux/kthread.h> | 38 | #include <linux/kthread.h> |
39 | #include <linux/platform/tegra/common.h> | 39 | #include <linux/platform/tegra/common.h> |
@@ -795,13 +795,13 @@ static int gk20a_pm_prepare_poweroff(struct device *dev) | |||
795 | 795 | ||
796 | gk20a_dbg_fn(""); | 796 | gk20a_dbg_fn(""); |
797 | 797 | ||
798 | mutex_lock(&g->poweroff_lock); | 798 | nvgpu_mutex_acquire(&g->poweroff_lock); |
799 | 799 | ||
800 | if (!g->power_on) | 800 | if (!g->power_on) |
801 | goto done; | 801 | goto done; |
802 | 802 | ||
803 | if (gk20a_fifo_is_engine_busy(g)) { | 803 | if (gk20a_fifo_is_engine_busy(g)) { |
804 | mutex_unlock(&g->poweroff_lock); | 804 | nvgpu_mutex_release(&g->poweroff_lock); |
805 | return -EBUSY; | 805 | return -EBUSY; |
806 | } | 806 | } |
807 | gk20a_scale_suspend(dev); | 807 | gk20a_scale_suspend(dev); |
@@ -844,7 +844,7 @@ static int gk20a_pm_prepare_poweroff(struct device *dev) | |||
844 | gk20a_lockout_registers(g); | 844 | gk20a_lockout_registers(g); |
845 | 845 | ||
846 | done: | 846 | done: |
847 | mutex_unlock(&g->poweroff_lock); | 847 | nvgpu_mutex_release(&g->poweroff_lock); |
848 | 848 | ||
849 | return ret; | 849 | return ret; |
850 | } | 850 | } |
@@ -1373,9 +1373,9 @@ static int gk20a_pm_unrailgate(struct device *dev) | |||
1373 | trace_gk20a_pm_unrailgate(dev_name(dev)); | 1373 | trace_gk20a_pm_unrailgate(dev_name(dev)); |
1374 | 1374 | ||
1375 | if (platform->unrailgate) { | 1375 | if (platform->unrailgate) { |
1376 | mutex_lock(&platform->railgate_lock); | 1376 | nvgpu_mutex_acquire(&platform->railgate_lock); |
1377 | ret = platform->unrailgate(dev); | 1377 | ret = platform->unrailgate(dev); |
1378 | mutex_unlock(&platform->railgate_lock); | 1378 | nvgpu_mutex_release(&platform->railgate_lock); |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | #ifdef CONFIG_DEBUG_FS | 1381 | #ifdef CONFIG_DEBUG_FS |
@@ -1896,11 +1896,11 @@ void gk20a_disable(struct gk20a *g, u32 units) | |||
1896 | 1896 | ||
1897 | gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units); | 1897 | gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units); |
1898 | 1898 | ||
1899 | spin_lock(&g->mc_enable_lock); | 1899 | nvgpu_spinlock_acquire(&g->mc_enable_lock); |
1900 | pmc = gk20a_readl(g, mc_enable_r()); | 1900 | pmc = gk20a_readl(g, mc_enable_r()); |
1901 | pmc &= ~units; | 1901 | pmc &= ~units; |
1902 | gk20a_writel(g, mc_enable_r(), pmc); | 1902 | gk20a_writel(g, mc_enable_r(), pmc); |
1903 | spin_unlock(&g->mc_enable_lock); | 1903 | nvgpu_spinlock_release(&g->mc_enable_lock); |
1904 | } | 1904 | } |
1905 | 1905 | ||
1906 | void gk20a_enable(struct gk20a *g, u32 units) | 1906 | void gk20a_enable(struct gk20a *g, u32 units) |
@@ -1909,12 +1909,12 @@ void gk20a_enable(struct gk20a *g, u32 units) | |||
1909 | 1909 | ||
1910 | gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units); | 1910 | gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units); |
1911 | 1911 | ||
1912 | spin_lock(&g->mc_enable_lock); | 1912 | nvgpu_spinlock_acquire(&g->mc_enable_lock); |
1913 | pmc = gk20a_readl(g, mc_enable_r()); | 1913 | pmc = gk20a_readl(g, mc_enable_r()); |
1914 | pmc |= units; | 1914 | pmc |= units; |
1915 | gk20a_writel(g, mc_enable_r(), pmc); | 1915 | gk20a_writel(g, mc_enable_r(), pmc); |
1916 | gk20a_readl(g, mc_enable_r()); | 1916 | gk20a_readl(g, mc_enable_r()); |
1917 | spin_unlock(&g->mc_enable_lock); | 1917 | nvgpu_spinlock_release(&g->mc_enable_lock); |
1918 | 1918 | ||
1919 | udelay(20); | 1919 | udelay(20); |
1920 | } | 1920 | } |
@@ -1953,7 +1953,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset) | |||
1953 | down_write(&g->busy_lock); | 1953 | down_write(&g->busy_lock); |
1954 | 1954 | ||
1955 | /* acquire railgate lock to prevent unrailgate in midst of do_idle() */ | 1955 | /* acquire railgate lock to prevent unrailgate in midst of do_idle() */ |
1956 | mutex_lock(&platform->railgate_lock); | 1956 | nvgpu_mutex_acquire(&platform->railgate_lock); |
1957 | 1957 | ||
1958 | /* check if it is already railgated ? */ | 1958 | /* check if it is already railgated ? */ |
1959 | if (platform->is_railgated(dev)) | 1959 | if (platform->is_railgated(dev)) |
@@ -1963,7 +1963,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset) | |||
1963 | * release railgate_lock, prevent suspend by incrementing usage counter, | 1963 | * release railgate_lock, prevent suspend by incrementing usage counter, |
1964 | * re-acquire railgate_lock | 1964 | * re-acquire railgate_lock |
1965 | */ | 1965 | */ |
1966 | mutex_unlock(&platform->railgate_lock); | 1966 | nvgpu_mutex_release(&platform->railgate_lock); |
1967 | pm_runtime_get_sync(dev); | 1967 | pm_runtime_get_sync(dev); |
1968 | 1968 | ||
1969 | /* | 1969 | /* |
@@ -1975,7 +1975,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset) | |||
1975 | target_ref_cnt = 2; | 1975 | target_ref_cnt = 2; |
1976 | else | 1976 | else |
1977 | target_ref_cnt = 1; | 1977 | target_ref_cnt = 1; |
1978 | mutex_lock(&platform->railgate_lock); | 1978 | nvgpu_mutex_acquire(&platform->railgate_lock); |
1979 | 1979 | ||
1980 | nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS, | 1980 | nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS, |
1981 | NVGPU_TIMER_CPU_TIMER); | 1981 | NVGPU_TIMER_CPU_TIMER); |
@@ -2052,7 +2052,7 @@ int __gk20a_do_idle(struct device *dev, bool force_reset) | |||
2052 | fail_drop_usage_count: | 2052 | fail_drop_usage_count: |
2053 | pm_runtime_put_noidle(dev); | 2053 | pm_runtime_put_noidle(dev); |
2054 | fail_timeout: | 2054 | fail_timeout: |
2055 | mutex_unlock(&platform->railgate_lock); | 2055 | nvgpu_mutex_release(&platform->railgate_lock); |
2056 | up_write(&g->busy_lock); | 2056 | up_write(&g->busy_lock); |
2057 | return -EBUSY; | 2057 | return -EBUSY; |
2058 | } | 2058 | } |
@@ -2101,7 +2101,7 @@ int __gk20a_do_unidle(struct device *dev) | |||
2101 | } | 2101 | } |
2102 | 2102 | ||
2103 | /* release the lock and open up all other busy() calls */ | 2103 | /* release the lock and open up all other busy() calls */ |
2104 | mutex_unlock(&platform->railgate_lock); | 2104 | nvgpu_mutex_release(&platform->railgate_lock); |
2105 | up_write(&g->busy_lock); | 2105 | up_write(&g->busy_lock); |
2106 | 2106 | ||
2107 | return 0; | 2107 | return 0; |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 31b02378..acc3b975 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -29,7 +29,7 @@ struct gk20a_ctxsw_trace; | |||
29 | struct acr_desc; | 29 | struct acr_desc; |
30 | 30 | ||
31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
32 | #include <linux/spinlock.h> | 32 | #include <nvgpu/lock.h> |
33 | #include <linux/nvgpu.h> | 33 | #include <linux/nvgpu.h> |
34 | #include <linux/irqreturn.h> | 34 | #include <linux/irqreturn.h> |
35 | #include <soc/tegra/chip-id.h> | 35 | #include <soc/tegra/chip-id.h> |
@@ -871,9 +871,9 @@ struct gk20a { | |||
871 | bool timeouts_enabled; | 871 | bool timeouts_enabled; |
872 | #endif | 872 | #endif |
873 | 873 | ||
874 | struct mutex ch_wdt_lock; | 874 | struct nvgpu_mutex ch_wdt_lock; |
875 | 875 | ||
876 | struct mutex poweroff_lock; | 876 | struct nvgpu_mutex poweroff_lock; |
877 | 877 | ||
878 | /* Channel priorities */ | 878 | /* Channel priorities */ |
879 | u32 timeslice_low_priority_us; | 879 | u32 timeslice_low_priority_us; |
@@ -900,7 +900,7 @@ struct gk20a { | |||
900 | u32 emc3d_ratio; | 900 | u32 emc3d_ratio; |
901 | 901 | ||
902 | #ifdef CONFIG_DEBUG_FS | 902 | #ifdef CONFIG_DEBUG_FS |
903 | spinlock_t debugfs_lock; | 903 | struct nvgpu_spinlock debugfs_lock; |
904 | struct dentry *debugfs_ltc_enabled; | 904 | struct dentry *debugfs_ltc_enabled; |
905 | struct dentry *debugfs_timeouts_enabled; | 905 | struct dentry *debugfs_timeouts_enabled; |
906 | struct dentry *debugfs_gr_idle_timeout_default; | 906 | struct dentry *debugfs_gr_idle_timeout_default; |
@@ -924,11 +924,11 @@ struct gk20a { | |||
924 | 924 | ||
925 | /* List of pending SW semaphore waits. */ | 925 | /* List of pending SW semaphore waits. */ |
926 | struct list_head pending_sema_waits; | 926 | struct list_head pending_sema_waits; |
927 | raw_spinlock_t pending_sema_waits_lock; | 927 | struct nvgpu_raw_spinlock pending_sema_waits_lock; |
928 | 928 | ||
929 | /* held while manipulating # of debug/profiler sessions present */ | 929 | /* held while manipulating # of debug/profiler sessions present */ |
930 | /* also prevents debug sessions from attaching until released */ | 930 | /* also prevents debug sessions from attaching until released */ |
931 | struct mutex dbg_sessions_lock; | 931 | struct nvgpu_mutex dbg_sessions_lock; |
932 | int dbg_powergating_disabled_refcount; /*refcount for pg disable */ | 932 | int dbg_powergating_disabled_refcount; /*refcount for pg disable */ |
933 | int dbg_timeout_disabled_refcount; /*refcount for timeout disable */ | 933 | int dbg_timeout_disabled_refcount; /*refcount for timeout disable */ |
934 | 934 | ||
@@ -942,7 +942,7 @@ struct gk20a { | |||
942 | u64 pg_ungating_time_us; | 942 | u64 pg_ungating_time_us; |
943 | u32 pg_gating_cnt; | 943 | u32 pg_gating_cnt; |
944 | 944 | ||
945 | spinlock_t mc_enable_lock; | 945 | struct nvgpu_spinlock mc_enable_lock; |
946 | 946 | ||
947 | struct nvgpu_gpu_characteristics gpu_characteristics; | 947 | struct nvgpu_gpu_characteristics gpu_characteristics; |
948 | 948 | ||
@@ -983,7 +983,7 @@ struct gk20a { | |||
983 | struct device *node; | 983 | struct device *node; |
984 | } sched; | 984 | } sched; |
985 | 985 | ||
986 | struct mutex client_lock; | 986 | struct nvgpu_mutex client_lock; |
987 | int client_refcount; /* open channels and ctrl nodes */ | 987 | int client_refcount; /* open channels and ctrl nodes */ |
988 | 988 | ||
989 | dev_t cdev_region; | 989 | dev_t cdev_region; |
@@ -1289,11 +1289,11 @@ static inline u32 get_field(u32 reg, u32 mask) | |||
1289 | /* invalidate channel lookup tlb */ | 1289 | /* invalidate channel lookup tlb */ |
1290 | static inline void gk20a_gr_flush_channel_tlb(struct gr_gk20a *gr) | 1290 | static inline void gk20a_gr_flush_channel_tlb(struct gr_gk20a *gr) |
1291 | { | 1291 | { |
1292 | spin_lock(&gr->ch_tlb_lock); | 1292 | nvgpu_spinlock_acquire(&gr->ch_tlb_lock); |
1293 | memset(gr->chid_tlb, 0, | 1293 | memset(gr->chid_tlb, 0, |
1294 | sizeof(struct gr_channel_map_tlb_entry) * | 1294 | sizeof(struct gr_channel_map_tlb_entry) * |
1295 | GR_CHANNEL_MAP_TLB_SIZE); | 1295 | GR_CHANNEL_MAP_TLB_SIZE); |
1296 | spin_unlock(&gr->ch_tlb_lock); | 1296 | nvgpu_spinlock_release(&gr->ch_tlb_lock); |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | /* classes that the device supports */ | 1299 | /* classes that the device supports */ |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index d3b91a50..aad6c07b 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -538,7 +538,7 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g, | |||
538 | struct gr_gk20a *gr = &g->gr; | 538 | struct gr_gk20a *gr = &g->gr; |
539 | int ret; | 539 | int ret; |
540 | 540 | ||
541 | mutex_lock(&gr->fecs_mutex); | 541 | nvgpu_mutex_acquire(&gr->fecs_mutex); |
542 | 542 | ||
543 | if (op.mailbox.id != 0) | 543 | if (op.mailbox.id != 0) |
544 | gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(op.mailbox.id), | 544 | gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(op.mailbox.id), |
@@ -561,7 +561,7 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g, | |||
561 | op.cond.fail, op.mailbox.fail, | 561 | op.cond.fail, op.mailbox.fail, |
562 | sleepduringwait); | 562 | sleepduringwait); |
563 | 563 | ||
564 | mutex_unlock(&gr->fecs_mutex); | 564 | nvgpu_mutex_release(&gr->fecs_mutex); |
565 | 565 | ||
566 | return ret; | 566 | return ret; |
567 | } | 567 | } |
@@ -573,7 +573,7 @@ int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g, | |||
573 | struct gr_gk20a *gr = &g->gr; | 573 | struct gr_gk20a *gr = &g->gr; |
574 | int ret; | 574 | int ret; |
575 | 575 | ||
576 | mutex_lock(&gr->fecs_mutex); | 576 | nvgpu_mutex_acquire(&gr->fecs_mutex); |
577 | 577 | ||
578 | gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(op.mailbox.id), | 578 | gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(op.mailbox.id), |
579 | gr_fecs_ctxsw_mailbox_clear_value_f(op.mailbox.clr)); | 579 | gr_fecs_ctxsw_mailbox_clear_value_f(op.mailbox.clr)); |
@@ -587,7 +587,7 @@ int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g, | |||
587 | op.cond.fail, op.mailbox.fail, | 587 | op.cond.fail, op.mailbox.fail, |
588 | false); | 588 | false); |
589 | 589 | ||
590 | mutex_unlock(&gr->fecs_mutex); | 590 | nvgpu_mutex_release(&gr->fecs_mutex); |
591 | 591 | ||
592 | return ret; | 592 | return ret; |
593 | } | 593 | } |
@@ -1596,7 +1596,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1596 | /* golden ctx is global to all channels. Although only the first | 1596 | /* golden ctx is global to all channels. Although only the first |
1597 | channel initializes golden image, driver needs to prevent multiple | 1597 | channel initializes golden image, driver needs to prevent multiple |
1598 | channels from initializing golden ctx at the same time */ | 1598 | channels from initializing golden ctx at the same time */ |
1599 | mutex_lock(&gr->ctx_mutex); | 1599 | nvgpu_mutex_acquire(&gr->ctx_mutex); |
1600 | 1600 | ||
1601 | if (gr->ctx_vars.golden_image_initialized) { | 1601 | if (gr->ctx_vars.golden_image_initialized) { |
1602 | goto clean_up; | 1602 | goto clean_up; |
@@ -1825,7 +1825,7 @@ clean_up: | |||
1825 | gk20a_mem_end(g, gold_mem); | 1825 | gk20a_mem_end(g, gold_mem); |
1826 | gk20a_mem_end(g, gr_mem); | 1826 | gk20a_mem_end(g, gr_mem); |
1827 | 1827 | ||
1828 | mutex_unlock(&gr->ctx_mutex); | 1828 | nvgpu_mutex_release(&gr->ctx_mutex); |
1829 | return err; | 1829 | return err; |
1830 | } | 1830 | } |
1831 | 1831 | ||
@@ -3327,7 +3327,7 @@ out: | |||
3327 | int gk20a_comptag_allocator_init(struct gk20a_comptag_allocator *allocator, | 3327 | int gk20a_comptag_allocator_init(struct gk20a_comptag_allocator *allocator, |
3328 | unsigned long size) | 3328 | unsigned long size) |
3329 | { | 3329 | { |
3330 | mutex_init(&allocator->lock); | 3330 | nvgpu_mutex_init(&allocator->lock); |
3331 | /* | 3331 | /* |
3332 | * 0th comptag is special and is never used. The base for this bitmap | 3332 | * 0th comptag is special and is never used. The base for this bitmap |
3333 | * is 1, and its size is one less than the size of comptag store. | 3333 | * is 1, and its size is one less than the size of comptag store. |
@@ -4064,7 +4064,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, | |||
4064 | 4064 | ||
4065 | /* no endian swap ? */ | 4065 | /* no endian swap ? */ |
4066 | 4066 | ||
4067 | mutex_lock(&gr->zbc_lock); | 4067 | nvgpu_mutex_acquire(&gr->zbc_lock); |
4068 | switch (zbc_val->type) { | 4068 | switch (zbc_val->type) { |
4069 | case GK20A_ZBC_TYPE_COLOR: | 4069 | case GK20A_ZBC_TYPE_COLOR: |
4070 | /* search existing tables */ | 4070 | /* search existing tables */ |
@@ -4159,7 +4159,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, | |||
4159 | } | 4159 | } |
4160 | 4160 | ||
4161 | err_mutex: | 4161 | err_mutex: |
4162 | mutex_unlock(&gr->zbc_lock); | 4162 | nvgpu_mutex_release(&gr->zbc_lock); |
4163 | return ret; | 4163 | return ret; |
4164 | } | 4164 | } |
4165 | 4165 | ||
@@ -4267,7 +4267,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr) | |||
4267 | struct zbc_entry zbc_val; | 4267 | struct zbc_entry zbc_val; |
4268 | u32 i, err; | 4268 | u32 i, err; |
4269 | 4269 | ||
4270 | mutex_init(&gr->zbc_lock); | 4270 | nvgpu_mutex_init(&gr->zbc_lock); |
4271 | 4271 | ||
4272 | /* load default color table */ | 4272 | /* load default color table */ |
4273 | zbc_val.type = GK20A_ZBC_TYPE_COLOR; | 4273 | zbc_val.type = GK20A_ZBC_TYPE_COLOR; |
@@ -5136,7 +5136,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) | |||
5136 | gr->g = g; | 5136 | gr->g = g; |
5137 | 5137 | ||
5138 | #if defined(CONFIG_GK20A_CYCLE_STATS) | 5138 | #if defined(CONFIG_GK20A_CYCLE_STATS) |
5139 | mutex_init(&g->gr.cs_lock); | 5139 | nvgpu_mutex_init(&g->gr.cs_lock); |
5140 | #endif | 5140 | #endif |
5141 | 5141 | ||
5142 | err = gr_gk20a_init_gr_config(g, gr); | 5142 | err = gr_gk20a_init_gr_config(g, gr); |
@@ -5172,8 +5172,8 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) | |||
5172 | 5172 | ||
5173 | gr_gk20a_load_zbc_default_table(g, gr); | 5173 | gr_gk20a_load_zbc_default_table(g, gr); |
5174 | 5174 | ||
5175 | mutex_init(&gr->ctx_mutex); | 5175 | nvgpu_mutex_init(&gr->ctx_mutex); |
5176 | spin_lock_init(&gr->ch_tlb_lock); | 5176 | nvgpu_spinlock_init(&gr->ch_tlb_lock); |
5177 | 5177 | ||
5178 | gr->remove_support = gk20a_remove_gr_support; | 5178 | gr->remove_support = gk20a_remove_gr_support; |
5179 | gr->sw_ready = true; | 5179 | gr->sw_ready = true; |
@@ -5244,7 +5244,7 @@ int gk20a_init_gr_support(struct gk20a *g) | |||
5244 | gk20a_dbg_fn(""); | 5244 | gk20a_dbg_fn(""); |
5245 | 5245 | ||
5246 | /* this is required before gr_gk20a_init_ctx_state */ | 5246 | /* this is required before gr_gk20a_init_ctx_state */ |
5247 | mutex_init(&g->gr.fecs_mutex); | 5247 | nvgpu_mutex_init(&g->gr.fecs_mutex); |
5248 | 5248 | ||
5249 | err = gr_gk20a_init_ctxsw(g); | 5249 | err = gr_gk20a_init_ctxsw(g); |
5250 | if (err) | 5250 | if (err) |
@@ -5468,7 +5468,7 @@ int gk20a_gr_reset(struct gk20a *g) | |||
5468 | int err; | 5468 | int err; |
5469 | u32 size; | 5469 | u32 size; |
5470 | 5470 | ||
5471 | mutex_lock(&g->gr.fecs_mutex); | 5471 | nvgpu_mutex_acquire(&g->gr.fecs_mutex); |
5472 | 5472 | ||
5473 | err = gk20a_enable_gr_hw(g); | 5473 | err = gk20a_enable_gr_hw(g); |
5474 | if (err) | 5474 | if (err) |
@@ -5482,7 +5482,7 @@ int gk20a_gr_reset(struct gk20a *g) | |||
5482 | if (err) | 5482 | if (err) |
5483 | return err; | 5483 | return err; |
5484 | 5484 | ||
5485 | mutex_unlock(&g->gr.fecs_mutex); | 5485 | nvgpu_mutex_release(&g->gr.fecs_mutex); |
5486 | 5486 | ||
5487 | /* this appears query for sw states but fecs actually init | 5487 | /* this appears query for sw states but fecs actually init |
5488 | ramchain, etc so this is hw init */ | 5488 | ramchain, etc so this is hw init */ |
@@ -5731,7 +5731,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, | |||
5731 | if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0)) | 5731 | if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0)) |
5732 | return 0; | 5732 | return 0; |
5733 | 5733 | ||
5734 | mutex_lock(&ch->cyclestate.cyclestate_buffer_mutex); | 5734 | nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex); |
5735 | 5735 | ||
5736 | virtual_address = ch->cyclestate.cyclestate_buffer; | 5736 | virtual_address = ch->cyclestate.cyclestate_buffer; |
5737 | buffer_size = ch->cyclestate.cyclestate_buffer_size; | 5737 | buffer_size = ch->cyclestate.cyclestate_buffer_size; |
@@ -5843,7 +5843,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, | |||
5843 | sh_hdr->completed = true; | 5843 | sh_hdr->completed = true; |
5844 | offset += sh_hdr->size; | 5844 | offset += sh_hdr->size; |
5845 | } | 5845 | } |
5846 | mutex_unlock(&ch->cyclestate.cyclestate_buffer_mutex); | 5846 | nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex); |
5847 | #endif | 5847 | #endif |
5848 | gk20a_dbg_fn(""); | 5848 | gk20a_dbg_fn(""); |
5849 | wake_up(&ch->notifier_wq); | 5849 | wake_up(&ch->notifier_wq); |
@@ -5874,7 +5874,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx( | |||
5874 | if (!gr_fecs_current_ctx_valid_v(curr_ctx)) | 5874 | if (!gr_fecs_current_ctx_valid_v(curr_ctx)) |
5875 | return NULL; | 5875 | return NULL; |
5876 | 5876 | ||
5877 | spin_lock(&gr->ch_tlb_lock); | 5877 | nvgpu_spinlock_acquire(&gr->ch_tlb_lock); |
5878 | 5878 | ||
5879 | /* check cache first */ | 5879 | /* check cache first */ |
5880 | for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { | 5880 | for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) { |
@@ -5926,7 +5926,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx( | |||
5926 | (GR_CHANNEL_MAP_TLB_SIZE - 1); | 5926 | (GR_CHANNEL_MAP_TLB_SIZE - 1); |
5927 | 5927 | ||
5928 | unlock: | 5928 | unlock: |
5929 | spin_unlock(&gr->ch_tlb_lock); | 5929 | nvgpu_spinlock_release(&gr->ch_tlb_lock); |
5930 | if (curr_tsgid) | 5930 | if (curr_tsgid) |
5931 | *curr_tsgid = tsgid; | 5931 | *curr_tsgid = tsgid; |
5932 | return ret; | 5932 | return ret; |
@@ -5998,7 +5998,7 @@ static int gk20a_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc) | |||
5998 | GPU_LIT_TPC_IN_GPC_STRIDE); | 5998 | GPU_LIT_TPC_IN_GPC_STRIDE); |
5999 | u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; | 5999 | u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; |
6000 | 6000 | ||
6001 | mutex_lock(&g->dbg_sessions_lock); | 6001 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
6002 | 6002 | ||
6003 | sm_id = gr_gpc0_tpc0_sm_cfg_sm_id_v(gk20a_readl(g, | 6003 | sm_id = gr_gpc0_tpc0_sm_cfg_sm_id_v(gk20a_readl(g, |
6004 | gr_gpc0_tpc0_sm_cfg_r() + offset)); | 6004 | gr_gpc0_tpc0_sm_cfg_r() + offset)); |
@@ -6012,7 +6012,7 @@ static int gk20a_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc) | |||
6012 | gr->sm_error_states[sm_id].hww_warp_esr_report_mask = gk20a_readl(g, | 6012 | gr->sm_error_states[sm_id].hww_warp_esr_report_mask = gk20a_readl(g, |
6013 | gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset); | 6013 | gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r() + offset); |
6014 | 6014 | ||
6015 | mutex_unlock(&g->dbg_sessions_lock); | 6015 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
6016 | 6016 | ||
6017 | return 0; | 6017 | return 0; |
6018 | } | 6018 | } |
@@ -6029,7 +6029,7 @@ static int gk20a_gr_update_sm_error_state(struct gk20a *g, | |||
6029 | GPU_LIT_TPC_IN_GPC_STRIDE); | 6029 | GPU_LIT_TPC_IN_GPC_STRIDE); |
6030 | int err = 0; | 6030 | int err = 0; |
6031 | 6031 | ||
6032 | mutex_lock(&g->dbg_sessions_lock); | 6032 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
6033 | 6033 | ||
6034 | gr->sm_error_states[sm_id].hww_global_esr = | 6034 | gr->sm_error_states[sm_id].hww_global_esr = |
6035 | sm_error_state->hww_global_esr; | 6035 | sm_error_state->hww_global_esr; |
@@ -6081,7 +6081,7 @@ enable_ctxsw: | |||
6081 | err = gr_gk20a_enable_ctxsw(g); | 6081 | err = gr_gk20a_enable_ctxsw(g); |
6082 | 6082 | ||
6083 | fail: | 6083 | fail: |
6084 | mutex_unlock(&g->dbg_sessions_lock); | 6084 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
6085 | return err; | 6085 | return err; |
6086 | } | 6086 | } |
6087 | 6087 | ||
@@ -6096,7 +6096,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g, | |||
6096 | GPU_LIT_TPC_IN_GPC_STRIDE); | 6096 | GPU_LIT_TPC_IN_GPC_STRIDE); |
6097 | int err = 0; | 6097 | int err = 0; |
6098 | 6098 | ||
6099 | mutex_lock(&g->dbg_sessions_lock); | 6099 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
6100 | 6100 | ||
6101 | memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states)); | 6101 | memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states)); |
6102 | 6102 | ||
@@ -6122,7 +6122,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g, | |||
6122 | err = gr_gk20a_enable_ctxsw(g); | 6122 | err = gr_gk20a_enable_ctxsw(g); |
6123 | 6123 | ||
6124 | fail: | 6124 | fail: |
6125 | mutex_unlock(&g->dbg_sessions_lock); | 6125 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
6126 | return err; | 6126 | return err; |
6127 | } | 6127 | } |
6128 | 6128 | ||
@@ -9128,7 +9128,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g, | |||
9128 | struct dbg_session_channel_data *ch_data; | 9128 | struct dbg_session_channel_data *ch_data; |
9129 | int err = 0; | 9129 | int err = 0; |
9130 | 9130 | ||
9131 | mutex_lock(&g->dbg_sessions_lock); | 9131 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
9132 | 9132 | ||
9133 | err = gr_gk20a_disable_ctxsw(g); | 9133 | err = gr_gk20a_disable_ctxsw(g); |
9134 | if (err) { | 9134 | if (err) { |
@@ -9136,7 +9136,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g, | |||
9136 | goto clean_up; | 9136 | goto clean_up; |
9137 | } | 9137 | } |
9138 | 9138 | ||
9139 | mutex_lock(&dbg_s->ch_list_lock); | 9139 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
9140 | 9140 | ||
9141 | list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { | 9141 | list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { |
9142 | ch = g->fifo.channel + ch_data->chid; | 9142 | ch = g->fifo.channel + ch_data->chid; |
@@ -9146,7 +9146,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g, | |||
9146 | local_ctx_resident_ch_fd = ch_data->channel_fd; | 9146 | local_ctx_resident_ch_fd = ch_data->channel_fd; |
9147 | } | 9147 | } |
9148 | 9148 | ||
9149 | mutex_unlock(&dbg_s->ch_list_lock); | 9149 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
9150 | 9150 | ||
9151 | err = gr_gk20a_enable_ctxsw(g); | 9151 | err = gr_gk20a_enable_ctxsw(g); |
9152 | if (err) | 9152 | if (err) |
@@ -9155,7 +9155,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g, | |||
9155 | *ctx_resident_ch_fd = local_ctx_resident_ch_fd; | 9155 | *ctx_resident_ch_fd = local_ctx_resident_ch_fd; |
9156 | 9156 | ||
9157 | clean_up: | 9157 | clean_up: |
9158 | mutex_unlock(&g->dbg_sessions_lock); | 9158 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
9159 | 9159 | ||
9160 | return err; | 9160 | return err; |
9161 | } | 9161 | } |
@@ -9170,7 +9170,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g, | |||
9170 | int err = 0; | 9170 | int err = 0; |
9171 | struct dbg_session_channel_data *ch_data; | 9171 | struct dbg_session_channel_data *ch_data; |
9172 | 9172 | ||
9173 | mutex_lock(&g->dbg_sessions_lock); | 9173 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
9174 | 9174 | ||
9175 | err = gr_gk20a_disable_ctxsw(g); | 9175 | err = gr_gk20a_disable_ctxsw(g); |
9176 | if (err) { | 9176 | if (err) { |
@@ -9193,7 +9193,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g, | |||
9193 | *ctx_resident_ch_fd = local_ctx_resident_ch_fd; | 9193 | *ctx_resident_ch_fd = local_ctx_resident_ch_fd; |
9194 | 9194 | ||
9195 | clean_up: | 9195 | clean_up: |
9196 | mutex_unlock(&g->dbg_sessions_lock); | 9196 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
9197 | 9197 | ||
9198 | return err; | 9198 | return err; |
9199 | } | 9199 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h index 5a987a82..2dd1eaf5 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h | |||
@@ -275,8 +275,8 @@ struct gr_gk20a { | |||
275 | bool valid; | 275 | bool valid; |
276 | } ctx_vars; | 276 | } ctx_vars; |
277 | 277 | ||
278 | struct mutex ctx_mutex; /* protect golden ctx init */ | 278 | struct nvgpu_mutex ctx_mutex; /* protect golden ctx init */ |
279 | struct mutex fecs_mutex; /* protect fecs method */ | 279 | struct nvgpu_mutex fecs_mutex; /* protect fecs method */ |
280 | 280 | ||
281 | #define GR_NETLIST_DYNAMIC -1 | 281 | #define GR_NETLIST_DYNAMIC -1 |
282 | #define GR_NETLIST_STATIC_A 'A' | 282 | #define GR_NETLIST_STATIC_A 'A' |
@@ -333,7 +333,7 @@ struct gr_gk20a { | |||
333 | u32 max_comptag_mem; /* max memory size (MB) for comptag */ | 333 | u32 max_comptag_mem; /* max memory size (MB) for comptag */ |
334 | struct compbit_store_desc compbit_store; | 334 | struct compbit_store_desc compbit_store; |
335 | struct gk20a_comptag_allocator { | 335 | struct gk20a_comptag_allocator { |
336 | struct mutex lock; | 336 | struct nvgpu_mutex lock; |
337 | /* this bitmap starts at ctag 1. 0th cannot be taken */ | 337 | /* this bitmap starts at ctag 1. 0th cannot be taken */ |
338 | unsigned long *bitmap; | 338 | unsigned long *bitmap; |
339 | /* size of bitmap, not max ctags, so one less */ | 339 | /* size of bitmap, not max ctags, so one less */ |
@@ -342,7 +342,7 @@ struct gr_gk20a { | |||
342 | 342 | ||
343 | struct gr_zcull_gk20a zcull; | 343 | struct gr_zcull_gk20a zcull; |
344 | 344 | ||
345 | struct mutex zbc_lock; | 345 | struct nvgpu_mutex zbc_lock; |
346 | struct zbc_color_table zbc_col_tbl[GK20A_ZBC_TABLE_SIZE]; | 346 | struct zbc_color_table zbc_col_tbl[GK20A_ZBC_TABLE_SIZE]; |
347 | struct zbc_depth_table zbc_dep_tbl[GK20A_ZBC_TABLE_SIZE]; | 347 | struct zbc_depth_table zbc_dep_tbl[GK20A_ZBC_TABLE_SIZE]; |
348 | #ifdef CONFIG_TEGRA_19x_GPU | 348 | #ifdef CONFIG_TEGRA_19x_GPU |
@@ -363,7 +363,7 @@ struct gr_gk20a { | |||
363 | #define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */ | 363 | #define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */ |
364 | struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE]; | 364 | struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE]; |
365 | u32 channel_tlb_flush_index; | 365 | u32 channel_tlb_flush_index; |
366 | spinlock_t ch_tlb_lock; | 366 | struct nvgpu_spinlock ch_tlb_lock; |
367 | 367 | ||
368 | void (*remove_support)(struct gr_gk20a *gr); | 368 | void (*remove_support)(struct gr_gk20a *gr); |
369 | bool sw_ready; | 369 | bool sw_ready; |
@@ -379,7 +379,7 @@ struct gr_gk20a { | |||
379 | struct sm_info *sm_to_cluster; | 379 | struct sm_info *sm_to_cluster; |
380 | struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_states; | 380 | struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_states; |
381 | #if defined(CONFIG_GK20A_CYCLE_STATS) | 381 | #if defined(CONFIG_GK20A_CYCLE_STATS) |
382 | struct mutex cs_lock; | 382 | struct nvgpu_mutex cs_lock; |
383 | struct gk20a_cs_snapshot *cs_data; | 383 | struct gk20a_cs_snapshot *cs_data; |
384 | #endif | 384 | #endif |
385 | }; | 385 | }; |
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c index e1c23f79..13819872 100644 --- a/drivers/gpu/nvgpu/gk20a/ltc_common.c +++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * GK20A Graphics | 4 | * GK20A Graphics |
5 | * | 5 | * |
6 | * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. | 6 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms and conditions of the GNU General Public License, | 9 | * under the terms and conditions of the GNU General Public License, |
@@ -138,7 +138,7 @@ static void gk20a_ltc_sync_debugfs(struct gk20a *g) | |||
138 | { | 138 | { |
139 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); | 139 | u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(); |
140 | 140 | ||
141 | spin_lock(&g->debugfs_lock); | 141 | nvgpu_spinlock_acquire(&g->debugfs_lock); |
142 | if (g->mm.ltc_enabled != g->mm.ltc_enabled_debug) { | 142 | if (g->mm.ltc_enabled != g->mm.ltc_enabled_debug) { |
143 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); | 143 | u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r()); |
144 | if (g->mm.ltc_enabled_debug) | 144 | if (g->mm.ltc_enabled_debug) |
@@ -151,6 +151,6 @@ static void gk20a_ltc_sync_debugfs(struct gk20a *g) | |||
151 | gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); | 151 | gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg); |
152 | g->mm.ltc_enabled = g->mm.ltc_enabled_debug; | 152 | g->mm.ltc_enabled = g->mm.ltc_enabled_debug; |
153 | } | 153 | } |
154 | spin_unlock(&g->debugfs_lock); | 154 | nvgpu_spinlock_release(&g->debugfs_lock); |
155 | } | 155 | } |
156 | #endif | 156 | #endif |
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c index 103952ca..5db48ae7 100644 --- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A L2 | 2 | * GK20A L2 |
3 | * | 3 | * |
4 | * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -120,7 +120,7 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
120 | if (gr->compbit_store.mem.size == 0) | 120 | if (gr->compbit_store.mem.size == 0) |
121 | return 0; | 121 | return 0; |
122 | 122 | ||
123 | mutex_lock(&g->mm.l2_op_lock); | 123 | nvgpu_mutex_acquire(&g->mm.l2_op_lock); |
124 | 124 | ||
125 | if (op == gk20a_cbc_op_clear) { | 125 | if (op == gk20a_cbc_op_clear) { |
126 | gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl2_r(), | 126 | gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl2_r(), |
@@ -168,7 +168,7 @@ static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, | |||
168 | } | 168 | } |
169 | out: | 169 | out: |
170 | trace_gk20a_ltc_cbc_ctrl_done(dev_name(g->dev)); | 170 | trace_gk20a_ltc_cbc_ctrl_done(dev_name(g->dev)); |
171 | mutex_unlock(&g->mm.l2_op_lock); | 171 | nvgpu_mutex_release(&g->mm.l2_op_lock); |
172 | return err; | 172 | return err; |
173 | } | 173 | } |
174 | 174 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index c62d1f6c..2539138a 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -142,7 +142,7 @@ static u32 gk20a_pramin_enter(struct gk20a *g, struct mem_desc *mem, | |||
142 | 142 | ||
143 | WARN_ON(!bufbase); | 143 | WARN_ON(!bufbase); |
144 | 144 | ||
145 | spin_lock(&g->mm.pramin_window_lock); | 145 | nvgpu_spinlock_acquire(&g->mm.pramin_window_lock); |
146 | 146 | ||
147 | if (g->mm.pramin_window != win) { | 147 | if (g->mm.pramin_window != win) { |
148 | gk20a_writel(g, bus_bar0_window_r(), win); | 148 | gk20a_writel(g, bus_bar0_window_r(), win); |
@@ -158,7 +158,7 @@ static void gk20a_pramin_exit(struct gk20a *g, struct mem_desc *mem, | |||
158 | { | 158 | { |
159 | gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, chunk); | 159 | gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, chunk); |
160 | 160 | ||
161 | spin_unlock(&g->mm.pramin_window_lock); | 161 | nvgpu_spinlock_release(&g->mm.pramin_window_lock); |
162 | } | 162 | } |
163 | 163 | ||
164 | /* | 164 | /* |
@@ -483,7 +483,7 @@ static int __must_check gk20a_init_ce_vm(struct mm_gk20a *mm); | |||
483 | static struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf); | 483 | static struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf); |
484 | 484 | ||
485 | struct gk20a_dmabuf_priv { | 485 | struct gk20a_dmabuf_priv { |
486 | struct mutex lock; | 486 | struct nvgpu_mutex lock; |
487 | 487 | ||
488 | struct gk20a_comptag_allocator *comptag_allocator; | 488 | struct gk20a_comptag_allocator *comptag_allocator; |
489 | struct gk20a_comptags comptags; | 489 | struct gk20a_comptags comptags; |
@@ -514,7 +514,7 @@ static int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator, | |||
514 | unsigned long addr; | 514 | unsigned long addr; |
515 | int err = 0; | 515 | int err = 0; |
516 | 516 | ||
517 | mutex_lock(&allocator->lock); | 517 | nvgpu_mutex_acquire(&allocator->lock); |
518 | addr = bitmap_find_next_zero_area(allocator->bitmap, allocator->size, | 518 | addr = bitmap_find_next_zero_area(allocator->bitmap, allocator->size, |
519 | 0, len, 0); | 519 | 0, len, 0); |
520 | if (addr < allocator->size) { | 520 | if (addr < allocator->size) { |
@@ -524,7 +524,7 @@ static int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator, | |||
524 | } else { | 524 | } else { |
525 | err = -ENOMEM; | 525 | err = -ENOMEM; |
526 | } | 526 | } |
527 | mutex_unlock(&allocator->lock); | 527 | nvgpu_mutex_release(&allocator->lock); |
528 | 528 | ||
529 | return err; | 529 | return err; |
530 | } | 530 | } |
@@ -538,9 +538,9 @@ static void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator, | |||
538 | WARN_ON(addr > allocator->size); | 538 | WARN_ON(addr > allocator->size); |
539 | WARN_ON(addr + len > allocator->size); | 539 | WARN_ON(addr + len > allocator->size); |
540 | 540 | ||
541 | mutex_lock(&allocator->lock); | 541 | nvgpu_mutex_acquire(&allocator->lock); |
542 | bitmap_clear(allocator->bitmap, addr, len); | 542 | bitmap_clear(allocator->bitmap, addr, len); |
543 | mutex_unlock(&allocator->lock); | 543 | nvgpu_mutex_release(&allocator->lock); |
544 | } | 544 | } |
545 | 545 | ||
546 | static void gk20a_mm_delete_priv(void *_priv) | 546 | static void gk20a_mm_delete_priv(void *_priv) |
@@ -575,12 +575,12 @@ struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf) | |||
575 | if (WARN_ON(!priv)) | 575 | if (WARN_ON(!priv)) |
576 | return ERR_PTR(-EINVAL); | 576 | return ERR_PTR(-EINVAL); |
577 | 577 | ||
578 | mutex_lock(&priv->lock); | 578 | nvgpu_mutex_acquire(&priv->lock); |
579 | 579 | ||
580 | if (priv->pin_count == 0) { | 580 | if (priv->pin_count == 0) { |
581 | priv->attach = dma_buf_attach(dmabuf, dev); | 581 | priv->attach = dma_buf_attach(dmabuf, dev); |
582 | if (IS_ERR(priv->attach)) { | 582 | if (IS_ERR(priv->attach)) { |
583 | mutex_unlock(&priv->lock); | 583 | nvgpu_mutex_release(&priv->lock); |
584 | return (struct sg_table *)priv->attach; | 584 | return (struct sg_table *)priv->attach; |
585 | } | 585 | } |
586 | 586 | ||
@@ -588,13 +588,13 @@ struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf) | |||
588 | DMA_BIDIRECTIONAL); | 588 | DMA_BIDIRECTIONAL); |
589 | if (IS_ERR(priv->sgt)) { | 589 | if (IS_ERR(priv->sgt)) { |
590 | dma_buf_detach(dmabuf, priv->attach); | 590 | dma_buf_detach(dmabuf, priv->attach); |
591 | mutex_unlock(&priv->lock); | 591 | nvgpu_mutex_release(&priv->lock); |
592 | return priv->sgt; | 592 | return priv->sgt; |
593 | } | 593 | } |
594 | } | 594 | } |
595 | 595 | ||
596 | priv->pin_count++; | 596 | priv->pin_count++; |
597 | mutex_unlock(&priv->lock); | 597 | nvgpu_mutex_release(&priv->lock); |
598 | return priv->sgt; | 598 | return priv->sgt; |
599 | } | 599 | } |
600 | 600 | ||
@@ -607,7 +607,7 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf, | |||
607 | if (IS_ERR(priv) || !priv) | 607 | if (IS_ERR(priv) || !priv) |
608 | return; | 608 | return; |
609 | 609 | ||
610 | mutex_lock(&priv->lock); | 610 | nvgpu_mutex_acquire(&priv->lock); |
611 | WARN_ON(priv->sgt != sgt); | 611 | WARN_ON(priv->sgt != sgt); |
612 | priv->pin_count--; | 612 | priv->pin_count--; |
613 | WARN_ON(priv->pin_count < 0); | 613 | WARN_ON(priv->pin_count < 0); |
@@ -617,7 +617,7 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf, | |||
617 | DMA_BIDIRECTIONAL); | 617 | DMA_BIDIRECTIONAL); |
618 | dma_buf_detach(dmabuf, priv->attach); | 618 | dma_buf_detach(dmabuf, priv->attach); |
619 | } | 619 | } |
620 | mutex_unlock(&priv->lock); | 620 | nvgpu_mutex_release(&priv->lock); |
621 | } | 621 | } |
622 | 622 | ||
623 | void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf, | 623 | void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf, |
@@ -842,7 +842,7 @@ static int gk20a_alloc_sysmem_flush(struct gk20a *g) | |||
842 | static void gk20a_init_pramin(struct mm_gk20a *mm) | 842 | static void gk20a_init_pramin(struct mm_gk20a *mm) |
843 | { | 843 | { |
844 | mm->pramin_window = 0; | 844 | mm->pramin_window = 0; |
845 | spin_lock_init(&mm->pramin_window_lock); | 845 | nvgpu_spinlock_init(&mm->pramin_window_lock); |
846 | mm->force_pramin = GK20A_FORCE_PRAMIN_DEFAULT; | 846 | mm->force_pramin = GK20A_FORCE_PRAMIN_DEFAULT; |
847 | } | 847 | } |
848 | 848 | ||
@@ -971,12 +971,12 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm) | |||
971 | mm->vidmem.bootstrap_base = bootstrap_base; | 971 | mm->vidmem.bootstrap_base = bootstrap_base; |
972 | mm->vidmem.bootstrap_size = bootstrap_size; | 972 | mm->vidmem.bootstrap_size = bootstrap_size; |
973 | 973 | ||
974 | mutex_init(&mm->vidmem.first_clear_mutex); | 974 | nvgpu_mutex_init(&mm->vidmem.first_clear_mutex); |
975 | 975 | ||
976 | INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker); | 976 | INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker); |
977 | atomic64_set(&mm->vidmem.bytes_pending, 0); | 977 | atomic64_set(&mm->vidmem.bytes_pending, 0); |
978 | INIT_LIST_HEAD(&mm->vidmem.clear_list_head); | 978 | INIT_LIST_HEAD(&mm->vidmem.clear_list_head); |
979 | mutex_init(&mm->vidmem.clear_list_mutex); | 979 | nvgpu_mutex_init(&mm->vidmem.clear_list_mutex); |
980 | 980 | ||
981 | gk20a_dbg_info("registered vidmem: %zu MB", size / SZ_1M); | 981 | gk20a_dbg_info("registered vidmem: %zu MB", size / SZ_1M); |
982 | 982 | ||
@@ -998,7 +998,7 @@ int gk20a_init_mm_setup_sw(struct gk20a *g) | |||
998 | } | 998 | } |
999 | 999 | ||
1000 | mm->g = g; | 1000 | mm->g = g; |
1001 | mutex_init(&mm->l2_op_lock); | 1001 | nvgpu_mutex_init(&mm->l2_op_lock); |
1002 | 1002 | ||
1003 | /*TBD: make channel vm size configurable */ | 1003 | /*TBD: make channel vm size configurable */ |
1004 | mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - | 1004 | mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - |
@@ -1484,12 +1484,12 @@ int gk20a_vm_get_buffers(struct vm_gk20a *vm, | |||
1484 | return 0; | 1484 | return 0; |
1485 | } | 1485 | } |
1486 | 1486 | ||
1487 | mutex_lock(&vm->update_gmmu_lock); | 1487 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
1488 | 1488 | ||
1489 | buffer_list = nvgpu_kalloc(sizeof(*buffer_list) * | 1489 | buffer_list = nvgpu_kalloc(sizeof(*buffer_list) * |
1490 | vm->num_user_mapped_buffers, true); | 1490 | vm->num_user_mapped_buffers, true); |
1491 | if (!buffer_list) { | 1491 | if (!buffer_list) { |
1492 | mutex_unlock(&vm->update_gmmu_lock); | 1492 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1493 | return -ENOMEM; | 1493 | return -ENOMEM; |
1494 | } | 1494 | } |
1495 | 1495 | ||
@@ -1510,7 +1510,7 @@ int gk20a_vm_get_buffers(struct vm_gk20a *vm, | |||
1510 | *num_buffers = vm->num_user_mapped_buffers; | 1510 | *num_buffers = vm->num_user_mapped_buffers; |
1511 | *mapped_buffers = buffer_list; | 1511 | *mapped_buffers = buffer_list; |
1512 | 1512 | ||
1513 | mutex_unlock(&vm->update_gmmu_lock); | 1513 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1514 | 1514 | ||
1515 | return 0; | 1515 | return 0; |
1516 | } | 1516 | } |
@@ -1544,9 +1544,9 @@ void gk20a_vm_mapping_batch_finish_locked( | |||
1544 | void gk20a_vm_mapping_batch_finish(struct vm_gk20a *vm, | 1544 | void gk20a_vm_mapping_batch_finish(struct vm_gk20a *vm, |
1545 | struct vm_gk20a_mapping_batch *mapping_batch) | 1545 | struct vm_gk20a_mapping_batch *mapping_batch) |
1546 | { | 1546 | { |
1547 | mutex_lock(&vm->update_gmmu_lock); | 1547 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
1548 | gk20a_vm_mapping_batch_finish_locked(vm, mapping_batch); | 1548 | gk20a_vm_mapping_batch_finish_locked(vm, mapping_batch); |
1549 | mutex_unlock(&vm->update_gmmu_lock); | 1549 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | void gk20a_vm_put_buffers(struct vm_gk20a *vm, | 1552 | void gk20a_vm_put_buffers(struct vm_gk20a *vm, |
@@ -1559,7 +1559,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm, | |||
1559 | if (num_buffers == 0) | 1559 | if (num_buffers == 0) |
1560 | return; | 1560 | return; |
1561 | 1561 | ||
1562 | mutex_lock(&vm->update_gmmu_lock); | 1562 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
1563 | gk20a_vm_mapping_batch_start(&batch); | 1563 | gk20a_vm_mapping_batch_start(&batch); |
1564 | vm->kref_put_batch = &batch; | 1564 | vm->kref_put_batch = &batch; |
1565 | 1565 | ||
@@ -1569,7 +1569,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm, | |||
1569 | 1569 | ||
1570 | vm->kref_put_batch = NULL; | 1570 | vm->kref_put_batch = NULL; |
1571 | gk20a_vm_mapping_batch_finish_locked(vm, &batch); | 1571 | gk20a_vm_mapping_batch_finish_locked(vm, &batch); |
1572 | mutex_unlock(&vm->update_gmmu_lock); | 1572 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1573 | 1573 | ||
1574 | nvgpu_kfree(mapped_buffers); | 1574 | nvgpu_kfree(mapped_buffers); |
1575 | } | 1575 | } |
@@ -1581,17 +1581,17 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | |||
1581 | int retries = 10000; /* 50 ms */ | 1581 | int retries = 10000; /* 50 ms */ |
1582 | struct mapped_buffer_node *mapped_buffer; | 1582 | struct mapped_buffer_node *mapped_buffer; |
1583 | 1583 | ||
1584 | mutex_lock(&vm->update_gmmu_lock); | 1584 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
1585 | 1585 | ||
1586 | mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset); | 1586 | mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset); |
1587 | if (!mapped_buffer) { | 1587 | if (!mapped_buffer) { |
1588 | mutex_unlock(&vm->update_gmmu_lock); | 1588 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1589 | gk20a_err(d, "invalid addr to unmap 0x%llx", offset); | 1589 | gk20a_err(d, "invalid addr to unmap 0x%llx", offset); |
1590 | return; | 1590 | return; |
1591 | } | 1591 | } |
1592 | 1592 | ||
1593 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { | 1593 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { |
1594 | mutex_unlock(&vm->update_gmmu_lock); | 1594 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1595 | 1595 | ||
1596 | while (retries >= 0 || !tegra_platform_is_silicon()) { | 1596 | while (retries >= 0 || !tegra_platform_is_silicon()) { |
1597 | if (atomic_read(&mapped_buffer->ref.refcount) == 1) | 1597 | if (atomic_read(&mapped_buffer->ref.refcount) == 1) |
@@ -1602,11 +1602,11 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | |||
1602 | if (retries < 0 && tegra_platform_is_silicon()) | 1602 | if (retries < 0 && tegra_platform_is_silicon()) |
1603 | gk20a_err(d, "sync-unmap failed on 0x%llx", | 1603 | gk20a_err(d, "sync-unmap failed on 0x%llx", |
1604 | offset); | 1604 | offset); |
1605 | mutex_lock(&vm->update_gmmu_lock); | 1605 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | if (mapped_buffer->user_mapped == 0) { | 1608 | if (mapped_buffer->user_mapped == 0) { |
1609 | mutex_unlock(&vm->update_gmmu_lock); | 1609 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1610 | gk20a_err(d, "addr already unmapped from user 0x%llx", offset); | 1610 | gk20a_err(d, "addr already unmapped from user 0x%llx", offset); |
1611 | return; | 1611 | return; |
1612 | } | 1612 | } |
@@ -1619,7 +1619,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | |||
1619 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); | 1619 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); |
1620 | vm->kref_put_batch = NULL; | 1620 | vm->kref_put_batch = NULL; |
1621 | 1621 | ||
1622 | mutex_unlock(&vm->update_gmmu_lock); | 1622 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1623 | } | 1623 | } |
1624 | 1624 | ||
1625 | u64 gk20a_vm_alloc_va(struct vm_gk20a *vm, | 1625 | u64 gk20a_vm_alloc_va(struct vm_gk20a *vm, |
@@ -2239,7 +2239,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) | |||
2239 | buf->g = g; | 2239 | buf->g = g; |
2240 | 2240 | ||
2241 | if (!g->mm.vidmem.cleared) { | 2241 | if (!g->mm.vidmem.cleared) { |
2242 | mutex_lock(&g->mm.vidmem.first_clear_mutex); | 2242 | nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex); |
2243 | if (!g->mm.vidmem.cleared) { | 2243 | if (!g->mm.vidmem.cleared) { |
2244 | err = gk20a_vidmem_clear_all(g); | 2244 | err = gk20a_vidmem_clear_all(g); |
2245 | if (err) { | 2245 | if (err) { |
@@ -2248,7 +2248,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) | |||
2248 | goto err_kfree; | 2248 | goto err_kfree; |
2249 | } | 2249 | } |
2250 | } | 2250 | } |
2251 | mutex_unlock(&g->mm.vidmem.first_clear_mutex); | 2251 | nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex); |
2252 | } | 2252 | } |
2253 | 2253 | ||
2254 | buf->mem = kzalloc(sizeof(struct mem_desc), GFP_KERNEL); | 2254 | buf->mem = kzalloc(sizeof(struct mem_desc), GFP_KERNEL); |
@@ -2301,10 +2301,10 @@ int gk20a_vidmem_get_space(struct gk20a *g, u64 *space) | |||
2301 | if (!nvgpu_alloc_initialized(allocator)) | 2301 | if (!nvgpu_alloc_initialized(allocator)) |
2302 | return -ENOSYS; | 2302 | return -ENOSYS; |
2303 | 2303 | ||
2304 | mutex_lock(&g->mm.vidmem.clear_list_mutex); | 2304 | nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); |
2305 | *space = nvgpu_alloc_space(allocator) + | 2305 | *space = nvgpu_alloc_space(allocator) + |
2306 | atomic64_read(&g->mm.vidmem.bytes_pending); | 2306 | atomic64_read(&g->mm.vidmem.bytes_pending); |
2307 | mutex_unlock(&g->mm.vidmem.clear_list_mutex); | 2307 | nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); |
2308 | return 0; | 2308 | return 0; |
2309 | #else | 2309 | #else |
2310 | return -ENOSYS; | 2310 | return -ENOSYS; |
@@ -2425,7 +2425,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, | |||
2425 | return -EFAULT; | 2425 | return -EFAULT; |
2426 | } | 2426 | } |
2427 | 2427 | ||
2428 | mutex_lock(&vm->update_gmmu_lock); | 2428 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
2429 | 2429 | ||
2430 | /* check if this buffer is already mapped */ | 2430 | /* check if this buffer is already mapped */ |
2431 | if (!vm->userspace_managed) { | 2431 | if (!vm->userspace_managed) { |
@@ -2434,7 +2434,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, | |||
2434 | flags, kind, sgt, | 2434 | flags, kind, sgt, |
2435 | user_mapped, rw_flag); | 2435 | user_mapped, rw_flag); |
2436 | if (map_offset) { | 2436 | if (map_offset) { |
2437 | mutex_unlock(&vm->update_gmmu_lock); | 2437 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2438 | return map_offset; | 2438 | return map_offset; |
2439 | } | 2439 | } |
2440 | } | 2440 | } |
@@ -2627,7 +2627,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, | |||
2627 | mapped_buffer->va_node = va_node; | 2627 | mapped_buffer->va_node = va_node; |
2628 | } | 2628 | } |
2629 | 2629 | ||
2630 | mutex_unlock(&vm->update_gmmu_lock); | 2630 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2631 | 2631 | ||
2632 | return map_offset; | 2632 | return map_offset; |
2633 | 2633 | ||
@@ -2643,7 +2643,7 @@ clean_up: | |||
2643 | if (!IS_ERR(bfr.sgt)) | 2643 | if (!IS_ERR(bfr.sgt)) |
2644 | gk20a_mm_unpin(d, dmabuf, bfr.sgt); | 2644 | gk20a_mm_unpin(d, dmabuf, bfr.sgt); |
2645 | 2645 | ||
2646 | mutex_unlock(&vm->update_gmmu_lock); | 2646 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2647 | gk20a_dbg_info("err=%d\n", err); | 2647 | gk20a_dbg_info("err=%d\n", err); |
2648 | return 0; | 2648 | return 0; |
2649 | } | 2649 | } |
@@ -2658,13 +2658,13 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm, | |||
2658 | struct mapped_buffer_node *mapped_buffer; | 2658 | struct mapped_buffer_node *mapped_buffer; |
2659 | struct device *d = dev_from_vm(vm); | 2659 | struct device *d = dev_from_vm(vm); |
2660 | 2660 | ||
2661 | mutex_lock(&vm->update_gmmu_lock); | 2661 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
2662 | 2662 | ||
2663 | mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, mapping_gva); | 2663 | mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, mapping_gva); |
2664 | 2664 | ||
2665 | if (!mapped_buffer || !mapped_buffer->user_mapped) | 2665 | if (!mapped_buffer || !mapped_buffer->user_mapped) |
2666 | { | 2666 | { |
2667 | mutex_unlock(&vm->update_gmmu_lock); | 2667 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2668 | gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); | 2668 | gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); |
2669 | return -EFAULT; | 2669 | return -EFAULT; |
2670 | } | 2670 | } |
@@ -2685,7 +2685,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm, | |||
2685 | *mapping_ctagline = mapped_buffer->ctag_offset; | 2685 | *mapping_ctagline = mapped_buffer->ctag_offset; |
2686 | } | 2686 | } |
2687 | 2687 | ||
2688 | mutex_unlock(&vm->update_gmmu_lock); | 2688 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2689 | return 0; | 2689 | return 0; |
2690 | } | 2690 | } |
2691 | 2691 | ||
@@ -2716,19 +2716,19 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, | |||
2716 | return -EFAULT; | 2716 | return -EFAULT; |
2717 | } | 2717 | } |
2718 | 2718 | ||
2719 | mutex_lock(&vm->update_gmmu_lock); | 2719 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
2720 | 2720 | ||
2721 | mapped_buffer = | 2721 | mapped_buffer = |
2722 | find_mapped_buffer_locked(&vm->mapped_buffers, mapping_gva); | 2722 | find_mapped_buffer_locked(&vm->mapped_buffers, mapping_gva); |
2723 | 2723 | ||
2724 | if (!mapped_buffer || !mapped_buffer->user_mapped) { | 2724 | if (!mapped_buffer || !mapped_buffer->user_mapped) { |
2725 | mutex_unlock(&vm->update_gmmu_lock); | 2725 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2726 | gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); | 2726 | gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); |
2727 | return -EFAULT; | 2727 | return -EFAULT; |
2728 | } | 2728 | } |
2729 | 2729 | ||
2730 | if (!mapped_buffer->ctags_mappable) { | 2730 | if (!mapped_buffer->ctags_mappable) { |
2731 | mutex_unlock(&vm->update_gmmu_lock); | 2731 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2732 | gk20a_err(d, "%s: comptags not mappable, offset 0x%llx", | 2732 | gk20a_err(d, "%s: comptags not mappable, offset 0x%llx", |
2733 | __func__, mapping_gva); | 2733 | __func__, mapping_gva); |
2734 | return -EFAULT; | 2734 | return -EFAULT; |
@@ -2747,7 +2747,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, | |||
2747 | u64 cacheline_offset_start; | 2747 | u64 cacheline_offset_start; |
2748 | 2748 | ||
2749 | if (!mapped_buffer->ctag_map_win_size) { | 2749 | if (!mapped_buffer->ctag_map_win_size) { |
2750 | mutex_unlock(&vm->update_gmmu_lock); | 2750 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2751 | gk20a_err(d, | 2751 | gk20a_err(d, |
2752 | "%s: mapping 0x%llx does not have " | 2752 | "%s: mapping 0x%llx does not have " |
2753 | "mappable comptags", | 2753 | "mappable comptags", |
@@ -2774,7 +2774,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, | |||
2774 | mapped_buffer->ctag_map_win_size, &va_node); | 2774 | mapped_buffer->ctag_map_win_size, &va_node); |
2775 | 2775 | ||
2776 | if (err) { | 2776 | if (err) { |
2777 | mutex_unlock(&vm->update_gmmu_lock); | 2777 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2778 | return err; | 2778 | return err; |
2779 | } | 2779 | } |
2780 | 2780 | ||
@@ -2783,7 +2783,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, | |||
2783 | * pointer if the space is freed | 2783 | * pointer if the space is freed |
2784 | * before before the buffer is | 2784 | * before before the buffer is |
2785 | * unmapped */ | 2785 | * unmapped */ |
2786 | mutex_unlock(&vm->update_gmmu_lock); | 2786 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2787 | gk20a_err(d, | 2787 | gk20a_err(d, |
2788 | "%s: comptags cannot be mapped into allocated space", | 2788 | "%s: comptags cannot be mapped into allocated space", |
2789 | __func__); | 2789 | __func__); |
@@ -2810,7 +2810,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, | |||
2810 | g->gr.compbit_store.mem.aperture); | 2810 | g->gr.compbit_store.mem.aperture); |
2811 | 2811 | ||
2812 | if (!mapped_buffer->ctag_map_win_addr) { | 2812 | if (!mapped_buffer->ctag_map_win_addr) { |
2813 | mutex_unlock(&vm->update_gmmu_lock); | 2813 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2814 | gk20a_err(d, | 2814 | gk20a_err(d, |
2815 | "%s: failed to map comptags for mapping 0x%llx", | 2815 | "%s: failed to map comptags for mapping 0x%llx", |
2816 | __func__, mapping_gva); | 2816 | __func__, mapping_gva); |
@@ -2818,7 +2818,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, | |||
2818 | } | 2818 | } |
2819 | } else if (fixed_mapping && *compbits_win_gva && | 2819 | } else if (fixed_mapping && *compbits_win_gva && |
2820 | mapped_buffer->ctag_map_win_addr != *compbits_win_gva) { | 2820 | mapped_buffer->ctag_map_win_addr != *compbits_win_gva) { |
2821 | mutex_unlock(&vm->update_gmmu_lock); | 2821 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2822 | gk20a_err(d, | 2822 | gk20a_err(d, |
2823 | "%s: re-requesting comptags map into mismatching address. buffer offset 0x" | 2823 | "%s: re-requesting comptags map into mismatching address. buffer offset 0x" |
2824 | "%llx, existing comptag map at 0x%llx, requested remap 0x%llx", | 2824 | "%llx, existing comptag map at 0x%llx, requested remap 0x%llx", |
@@ -2830,7 +2830,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, | |||
2830 | *mapping_iova = gk20a_mm_iova_addr(g, mapped_buffer->sgt->sgl, 0); | 2830 | *mapping_iova = gk20a_mm_iova_addr(g, mapped_buffer->sgt->sgl, 0); |
2831 | *compbits_win_gva = mapped_buffer->ctag_map_win_addr; | 2831 | *compbits_win_gva = mapped_buffer->ctag_map_win_addr; |
2832 | 2832 | ||
2833 | mutex_unlock(&vm->update_gmmu_lock); | 2833 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2834 | 2834 | ||
2835 | return 0; | 2835 | return 0; |
2836 | } | 2836 | } |
@@ -2852,7 +2852,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm, | |||
2852 | struct gk20a *g = gk20a_from_vm(vm); | 2852 | struct gk20a *g = gk20a_from_vm(vm); |
2853 | u64 vaddr; | 2853 | u64 vaddr; |
2854 | 2854 | ||
2855 | mutex_lock(&vm->update_gmmu_lock); | 2855 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
2856 | vaddr = g->ops.mm.gmmu_map(vm, addr, | 2856 | vaddr = g->ops.mm.gmmu_map(vm, addr, |
2857 | *sgt, /* sg table */ | 2857 | *sgt, /* sg table */ |
2858 | 0, /* sg offset */ | 2858 | 0, /* sg offset */ |
@@ -2866,7 +2866,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm, | |||
2866 | priv, /* priv */ | 2866 | priv, /* priv */ |
2867 | NULL, /* mapping_batch handle */ | 2867 | NULL, /* mapping_batch handle */ |
2868 | aperture); | 2868 | aperture); |
2869 | mutex_unlock(&vm->update_gmmu_lock); | 2869 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
2870 | if (!vaddr) { | 2870 | if (!vaddr) { |
2871 | gk20a_err(dev_from_vm(vm), "failed to allocate va space"); | 2871 | gk20a_err(dev_from_vm(vm), "failed to allocate va space"); |
2872 | return 0; | 2872 | return 0; |
@@ -3128,10 +3128,10 @@ int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr, | |||
3128 | * are not done anyway */ | 3128 | * are not done anyway */ |
3129 | WARN_ON(attr != 0 && attr != DMA_ATTR_NO_KERNEL_MAPPING); | 3129 | WARN_ON(attr != 0 && attr != DMA_ATTR_NO_KERNEL_MAPPING); |
3130 | 3130 | ||
3131 | mutex_lock(&g->mm.vidmem.clear_list_mutex); | 3131 | nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); |
3132 | before_pending = atomic64_read(&g->mm.vidmem.bytes_pending); | 3132 | before_pending = atomic64_read(&g->mm.vidmem.bytes_pending); |
3133 | addr = __gk20a_gmmu_alloc(vidmem_alloc, at, size); | 3133 | addr = __gk20a_gmmu_alloc(vidmem_alloc, at, size); |
3134 | mutex_unlock(&g->mm.vidmem.clear_list_mutex); | 3134 | nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); |
3135 | if (!addr) { | 3135 | if (!addr) { |
3136 | /* | 3136 | /* |
3137 | * If memory is known to be freed soon, let the user know that | 3137 | * If memory is known to be freed soon, let the user know that |
@@ -3188,12 +3188,12 @@ static void gk20a_gmmu_free_attr_vid(struct gk20a *g, enum dma_attr attr, | |||
3188 | bool was_empty; | 3188 | bool was_empty; |
3189 | 3189 | ||
3190 | if (mem->user_mem) { | 3190 | if (mem->user_mem) { |
3191 | mutex_lock(&g->mm.vidmem.clear_list_mutex); | 3191 | nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); |
3192 | was_empty = list_empty(&g->mm.vidmem.clear_list_head); | 3192 | was_empty = list_empty(&g->mm.vidmem.clear_list_head); |
3193 | list_add_tail(&mem->clear_list_entry, | 3193 | list_add_tail(&mem->clear_list_entry, |
3194 | &g->mm.vidmem.clear_list_head); | 3194 | &g->mm.vidmem.clear_list_head); |
3195 | atomic64_add(mem->size, &g->mm.vidmem.bytes_pending); | 3195 | atomic64_add(mem->size, &g->mm.vidmem.bytes_pending); |
3196 | mutex_unlock(&g->mm.vidmem.clear_list_mutex); | 3196 | nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); |
3197 | 3197 | ||
3198 | if (was_empty) { | 3198 | if (was_empty) { |
3199 | cancel_work_sync(&g->mm.vidmem.clear_mem_worker); | 3199 | cancel_work_sync(&g->mm.vidmem.clear_mem_worker); |
@@ -3258,12 +3258,12 @@ static struct mem_desc *get_pending_mem_desc(struct mm_gk20a *mm) | |||
3258 | { | 3258 | { |
3259 | struct mem_desc *mem = NULL; | 3259 | struct mem_desc *mem = NULL; |
3260 | 3260 | ||
3261 | mutex_lock(&mm->vidmem.clear_list_mutex); | 3261 | nvgpu_mutex_acquire(&mm->vidmem.clear_list_mutex); |
3262 | mem = list_first_entry_or_null(&mm->vidmem.clear_list_head, | 3262 | mem = list_first_entry_or_null(&mm->vidmem.clear_list_head, |
3263 | struct mem_desc, clear_list_entry); | 3263 | struct mem_desc, clear_list_entry); |
3264 | if (mem) | 3264 | if (mem) |
3265 | list_del_init(&mem->clear_list_entry); | 3265 | list_del_init(&mem->clear_list_entry); |
3266 | mutex_unlock(&mm->vidmem.clear_list_mutex); | 3266 | nvgpu_mutex_release(&mm->vidmem.clear_list_mutex); |
3267 | 3267 | ||
3268 | return mem; | 3268 | return mem; |
3269 | } | 3269 | } |
@@ -3409,12 +3409,12 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr) | |||
3409 | dma_addr_t addr = 0; | 3409 | dma_addr_t addr = 0; |
3410 | struct gk20a *g = gk20a_from_vm(vm); | 3410 | struct gk20a *g = gk20a_from_vm(vm); |
3411 | 3411 | ||
3412 | mutex_lock(&vm->update_gmmu_lock); | 3412 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
3413 | buffer = find_mapped_buffer_locked(&vm->mapped_buffers, gpu_vaddr); | 3413 | buffer = find_mapped_buffer_locked(&vm->mapped_buffers, gpu_vaddr); |
3414 | if (buffer) | 3414 | if (buffer) |
3415 | addr = g->ops.mm.get_iova_addr(g, buffer->sgt->sgl, | 3415 | addr = g->ops.mm.get_iova_addr(g, buffer->sgt->sgl, |
3416 | buffer->flags); | 3416 | buffer->flags); |
3417 | mutex_unlock(&vm->update_gmmu_lock); | 3417 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
3418 | 3418 | ||
3419 | return addr; | 3419 | return addr; |
3420 | } | 3420 | } |
@@ -3426,7 +3426,7 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm, | |||
3426 | { | 3426 | { |
3427 | struct gk20a *g = gk20a_from_vm(vm); | 3427 | struct gk20a *g = gk20a_from_vm(vm); |
3428 | 3428 | ||
3429 | mutex_lock(&vm->update_gmmu_lock); | 3429 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
3430 | g->ops.mm.gmmu_unmap(vm, | 3430 | g->ops.mm.gmmu_unmap(vm, |
3431 | vaddr, | 3431 | vaddr, |
3432 | size, | 3432 | size, |
@@ -3435,7 +3435,7 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm, | |||
3435 | rw_flag, | 3435 | rw_flag, |
3436 | false, | 3436 | false, |
3437 | NULL); | 3437 | NULL); |
3438 | mutex_unlock(&vm->update_gmmu_lock); | 3438 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
3439 | } | 3439 | } |
3440 | 3440 | ||
3441 | phys_addr_t gk20a_get_phys_from_iova(struct device *d, | 3441 | phys_addr_t gk20a_get_phys_from_iova(struct device *d, |
@@ -4053,16 +4053,16 @@ void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset) | |||
4053 | struct device *d = dev_from_vm(vm); | 4053 | struct device *d = dev_from_vm(vm); |
4054 | struct mapped_buffer_node *mapped_buffer; | 4054 | struct mapped_buffer_node *mapped_buffer; |
4055 | 4055 | ||
4056 | mutex_lock(&vm->update_gmmu_lock); | 4056 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
4057 | mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset); | 4057 | mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset); |
4058 | if (!mapped_buffer) { | 4058 | if (!mapped_buffer) { |
4059 | mutex_unlock(&vm->update_gmmu_lock); | 4059 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
4060 | gk20a_err(d, "invalid addr to unmap 0x%llx", offset); | 4060 | gk20a_err(d, "invalid addr to unmap 0x%llx", offset); |
4061 | return; | 4061 | return; |
4062 | } | 4062 | } |
4063 | 4063 | ||
4064 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); | 4064 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); |
4065 | mutex_unlock(&vm->update_gmmu_lock); | 4065 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
4066 | } | 4066 | } |
4067 | 4067 | ||
4068 | static void gk20a_vm_free_entries(struct vm_gk20a *vm, | 4068 | static void gk20a_vm_free_entries(struct vm_gk20a *vm, |
@@ -4101,7 +4101,7 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm) | |||
4101 | } | 4101 | } |
4102 | } | 4102 | } |
4103 | 4103 | ||
4104 | mutex_lock(&vm->update_gmmu_lock); | 4104 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
4105 | 4105 | ||
4106 | /* TBD: add a flag here for the unmap code to recognize teardown | 4106 | /* TBD: add a flag here for the unmap code to recognize teardown |
4107 | * and short-circuit any otherwise expensive operations. */ | 4107 | * and short-circuit any otherwise expensive operations. */ |
@@ -4123,7 +4123,7 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm) | |||
4123 | 4123 | ||
4124 | gk20a_deinit_vm(vm); | 4124 | gk20a_deinit_vm(vm); |
4125 | 4125 | ||
4126 | mutex_unlock(&vm->update_gmmu_lock); | 4126 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
4127 | } | 4127 | } |
4128 | 4128 | ||
4129 | void gk20a_vm_remove_support(struct vm_gk20a *vm) | 4129 | void gk20a_vm_remove_support(struct vm_gk20a *vm) |
@@ -4547,7 +4547,7 @@ int gk20a_init_vm(struct mm_gk20a *mm, | |||
4547 | 4547 | ||
4548 | vm->mapped_buffers = RB_ROOT; | 4548 | vm->mapped_buffers = RB_ROOT; |
4549 | 4549 | ||
4550 | mutex_init(&vm->update_gmmu_lock); | 4550 | nvgpu_mutex_init(&vm->update_gmmu_lock); |
4551 | kref_init(&vm->ref); | 4551 | kref_init(&vm->ref); |
4552 | INIT_LIST_HEAD(&vm->reserved_va_list); | 4552 | INIT_LIST_HEAD(&vm->reserved_va_list); |
4553 | 4553 | ||
@@ -4696,7 +4696,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, | |||
4696 | INIT_LIST_HEAD(&va_node->va_buffers_list); | 4696 | INIT_LIST_HEAD(&va_node->va_buffers_list); |
4697 | INIT_LIST_HEAD(&va_node->reserved_va_list); | 4697 | INIT_LIST_HEAD(&va_node->reserved_va_list); |
4698 | 4698 | ||
4699 | mutex_lock(&vm->update_gmmu_lock); | 4699 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
4700 | 4700 | ||
4701 | /* mark that we need to use sparse mappings here */ | 4701 | /* mark that we need to use sparse mappings here */ |
4702 | if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) { | 4702 | if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) { |
@@ -4715,7 +4715,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, | |||
4715 | NULL, | 4715 | NULL, |
4716 | APERTURE_INVALID); | 4716 | APERTURE_INVALID); |
4717 | if (!map_offset) { | 4717 | if (!map_offset) { |
4718 | mutex_unlock(&vm->update_gmmu_lock); | 4718 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
4719 | nvgpu_free(vma, vaddr_start); | 4719 | nvgpu_free(vma, vaddr_start); |
4720 | kfree(va_node); | 4720 | kfree(va_node); |
4721 | goto clean_up; | 4721 | goto clean_up; |
@@ -4725,7 +4725,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, | |||
4725 | } | 4725 | } |
4726 | list_add_tail(&va_node->reserved_va_list, &vm->reserved_va_list); | 4726 | list_add_tail(&va_node->reserved_va_list, &vm->reserved_va_list); |
4727 | 4727 | ||
4728 | mutex_unlock(&vm->update_gmmu_lock); | 4728 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
4729 | 4729 | ||
4730 | args->o_a.offset = vaddr_start; | 4730 | args->o_a.offset = vaddr_start; |
4731 | err = 0; | 4731 | err = 0; |
@@ -4754,7 +4754,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share, | |||
4754 | vma = vm->vma[pgsz_idx]; | 4754 | vma = vm->vma[pgsz_idx]; |
4755 | nvgpu_free(vma, args->offset); | 4755 | nvgpu_free(vma, args->offset); |
4756 | 4756 | ||
4757 | mutex_lock(&vm->update_gmmu_lock); | 4757 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
4758 | va_node = addr_to_reservation(vm, args->offset); | 4758 | va_node = addr_to_reservation(vm, args->offset); |
4759 | if (va_node) { | 4759 | if (va_node) { |
4760 | struct mapped_buffer_node *buffer, *n; | 4760 | struct mapped_buffer_node *buffer, *n; |
@@ -4782,7 +4782,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share, | |||
4782 | NULL); | 4782 | NULL); |
4783 | kfree(va_node); | 4783 | kfree(va_node); |
4784 | } | 4784 | } |
4785 | mutex_unlock(&vm->update_gmmu_lock); | 4785 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
4786 | err = 0; | 4786 | err = 0; |
4787 | 4787 | ||
4788 | return err; | 4788 | return err; |
@@ -4819,7 +4819,7 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev) | |||
4819 | if (likely(priv)) | 4819 | if (likely(priv)) |
4820 | return 0; | 4820 | return 0; |
4821 | 4821 | ||
4822 | mutex_lock(&priv_lock); | 4822 | nvgpu_mutex_acquire(&priv_lock); |
4823 | priv = dma_buf_get_drvdata(dmabuf, dev); | 4823 | priv = dma_buf_get_drvdata(dmabuf, dev); |
4824 | if (priv) | 4824 | if (priv) |
4825 | goto priv_exist_or_err; | 4825 | goto priv_exist_or_err; |
@@ -4828,12 +4828,12 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev) | |||
4828 | priv = ERR_PTR(-ENOMEM); | 4828 | priv = ERR_PTR(-ENOMEM); |
4829 | goto priv_exist_or_err; | 4829 | goto priv_exist_or_err; |
4830 | } | 4830 | } |
4831 | mutex_init(&priv->lock); | 4831 | nvgpu_mutex_init(&priv->lock); |
4832 | INIT_LIST_HEAD(&priv->states); | 4832 | INIT_LIST_HEAD(&priv->states); |
4833 | priv->buffer_id = ++priv_count; | 4833 | priv->buffer_id = ++priv_count; |
4834 | dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv); | 4834 | dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv); |
4835 | priv_exist_or_err: | 4835 | priv_exist_or_err: |
4836 | mutex_unlock(&priv_lock); | 4836 | nvgpu_mutex_release(&priv_lock); |
4837 | if (IS_ERR(priv)) | 4837 | if (IS_ERR(priv)) |
4838 | return -ENOMEM; | 4838 | return -ENOMEM; |
4839 | 4839 | ||
@@ -4858,7 +4858,7 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev, | |||
4858 | if (WARN_ON(!priv)) | 4858 | if (WARN_ON(!priv)) |
4859 | return -ENOSYS; | 4859 | return -ENOSYS; |
4860 | 4860 | ||
4861 | mutex_lock(&priv->lock); | 4861 | nvgpu_mutex_acquire(&priv->lock); |
4862 | 4862 | ||
4863 | list_for_each_entry(s, &priv->states, list) | 4863 | list_for_each_entry(s, &priv->states, list) |
4864 | if (s->offset == offset) | 4864 | if (s->offset == offset) |
@@ -4873,11 +4873,11 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev, | |||
4873 | 4873 | ||
4874 | s->offset = offset; | 4874 | s->offset = offset; |
4875 | INIT_LIST_HEAD(&s->list); | 4875 | INIT_LIST_HEAD(&s->list); |
4876 | mutex_init(&s->lock); | 4876 | nvgpu_mutex_init(&s->lock); |
4877 | list_add_tail(&s->list, &priv->states); | 4877 | list_add_tail(&s->list, &priv->states); |
4878 | 4878 | ||
4879 | out: | 4879 | out: |
4880 | mutex_unlock(&priv->lock); | 4880 | nvgpu_mutex_release(&priv->lock); |
4881 | if (!err) | 4881 | if (!err) |
4882 | *state = s; | 4882 | *state = s; |
4883 | return err; | 4883 | return err; |
@@ -5152,7 +5152,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
5152 | 5152 | ||
5153 | nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER); | 5153 | nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER); |
5154 | 5154 | ||
5155 | mutex_lock(&mm->l2_op_lock); | 5155 | nvgpu_mutex_acquire(&mm->l2_op_lock); |
5156 | 5156 | ||
5157 | /* Make sure all previous writes are committed to the L2. There's no | 5157 | /* Make sure all previous writes are committed to the L2. There's no |
5158 | guarantee that writes are to DRAM. This will be a sysmembar internal | 5158 | guarantee that writes are to DRAM. This will be a sysmembar internal |
@@ -5184,7 +5184,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
5184 | 5184 | ||
5185 | trace_gk20a_mm_fb_flush_done(dev_name(g->dev)); | 5185 | trace_gk20a_mm_fb_flush_done(dev_name(g->dev)); |
5186 | 5186 | ||
5187 | mutex_unlock(&mm->l2_op_lock); | 5187 | nvgpu_mutex_release(&mm->l2_op_lock); |
5188 | 5188 | ||
5189 | pm_runtime_put_noidle(g->dev); | 5189 | pm_runtime_put_noidle(g->dev); |
5190 | 5190 | ||
@@ -5231,9 +5231,9 @@ void gk20a_mm_l2_invalidate(struct gk20a *g) | |||
5231 | struct mm_gk20a *mm = &g->mm; | 5231 | struct mm_gk20a *mm = &g->mm; |
5232 | gk20a_busy_noresume(g->dev); | 5232 | gk20a_busy_noresume(g->dev); |
5233 | if (g->power_on) { | 5233 | if (g->power_on) { |
5234 | mutex_lock(&mm->l2_op_lock); | 5234 | nvgpu_mutex_acquire(&mm->l2_op_lock); |
5235 | gk20a_mm_l2_invalidate_locked(g); | 5235 | gk20a_mm_l2_invalidate_locked(g); |
5236 | mutex_unlock(&mm->l2_op_lock); | 5236 | nvgpu_mutex_release(&mm->l2_op_lock); |
5237 | } | 5237 | } |
5238 | pm_runtime_put_noidle(g->dev); | 5238 | pm_runtime_put_noidle(g->dev); |
5239 | } | 5239 | } |
@@ -5252,7 +5252,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
5252 | 5252 | ||
5253 | nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER); | 5253 | nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER); |
5254 | 5254 | ||
5255 | mutex_lock(&mm->l2_op_lock); | 5255 | nvgpu_mutex_acquire(&mm->l2_op_lock); |
5256 | 5256 | ||
5257 | trace_gk20a_mm_l2_flush(dev_name(g->dev)); | 5257 | trace_gk20a_mm_l2_flush(dev_name(g->dev)); |
5258 | 5258 | ||
@@ -5280,7 +5280,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
5280 | if (invalidate) | 5280 | if (invalidate) |
5281 | gk20a_mm_l2_invalidate_locked(g); | 5281 | gk20a_mm_l2_invalidate_locked(g); |
5282 | 5282 | ||
5283 | mutex_unlock(&mm->l2_op_lock); | 5283 | nvgpu_mutex_release(&mm->l2_op_lock); |
5284 | 5284 | ||
5285 | hw_was_off: | 5285 | hw_was_off: |
5286 | pm_runtime_put_noidle(g->dev); | 5286 | pm_runtime_put_noidle(g->dev); |
@@ -5300,7 +5300,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) | |||
5300 | 5300 | ||
5301 | nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER); | 5301 | nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER); |
5302 | 5302 | ||
5303 | mutex_lock(&mm->l2_op_lock); | 5303 | nvgpu_mutex_acquire(&mm->l2_op_lock); |
5304 | 5304 | ||
5305 | /* Flush all dirty lines from the CBC to L2 */ | 5305 | /* Flush all dirty lines from the CBC to L2 */ |
5306 | gk20a_writel(g, flush_l2_clean_comptags_r(), | 5306 | gk20a_writel(g, flush_l2_clean_comptags_r(), |
@@ -5320,7 +5320,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) | |||
5320 | } while (!nvgpu_timeout_expired_msg(&timeout, | 5320 | } while (!nvgpu_timeout_expired_msg(&timeout, |
5321 | "l2_clean_comptags too many retries")); | 5321 | "l2_clean_comptags too many retries")); |
5322 | 5322 | ||
5323 | mutex_unlock(&mm->l2_op_lock); | 5323 | nvgpu_mutex_release(&mm->l2_op_lock); |
5324 | 5324 | ||
5325 | hw_was_off: | 5325 | hw_was_off: |
5326 | pm_runtime_put_noidle(g->dev); | 5326 | pm_runtime_put_noidle(g->dev); |
@@ -5334,19 +5334,19 @@ int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va, | |||
5334 | 5334 | ||
5335 | gk20a_dbg_fn("gpu_va=0x%llx", gpu_va); | 5335 | gk20a_dbg_fn("gpu_va=0x%llx", gpu_va); |
5336 | 5336 | ||
5337 | mutex_lock(&vm->update_gmmu_lock); | 5337 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
5338 | 5338 | ||
5339 | mapped_buffer = find_mapped_buffer_range_locked(&vm->mapped_buffers, | 5339 | mapped_buffer = find_mapped_buffer_range_locked(&vm->mapped_buffers, |
5340 | gpu_va); | 5340 | gpu_va); |
5341 | if (!mapped_buffer) { | 5341 | if (!mapped_buffer) { |
5342 | mutex_unlock(&vm->update_gmmu_lock); | 5342 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
5343 | return -EINVAL; | 5343 | return -EINVAL; |
5344 | } | 5344 | } |
5345 | 5345 | ||
5346 | *dmabuf = mapped_buffer->dmabuf; | 5346 | *dmabuf = mapped_buffer->dmabuf; |
5347 | *offset = gpu_va - mapped_buffer->addr; | 5347 | *offset = gpu_va - mapped_buffer->addr; |
5348 | 5348 | ||
5349 | mutex_unlock(&vm->update_gmmu_lock); | 5349 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
5350 | 5350 | ||
5351 | return 0; | 5351 | return 0; |
5352 | } | 5352 | } |
@@ -5373,7 +5373,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm) | |||
5373 | 5373 | ||
5374 | addr_lo = u64_lo32(gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0) >> 12); | 5374 | addr_lo = u64_lo32(gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0) >> 12); |
5375 | 5375 | ||
5376 | mutex_lock(&tlb_lock); | 5376 | nvgpu_mutex_acquire(&tlb_lock); |
5377 | 5377 | ||
5378 | trace_gk20a_mm_tlb_invalidate(dev_name(g->dev)); | 5378 | trace_gk20a_mm_tlb_invalidate(dev_name(g->dev)); |
5379 | 5379 | ||
@@ -5414,7 +5414,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm) | |||
5414 | trace_gk20a_mm_tlb_invalidate_done(dev_name(g->dev)); | 5414 | trace_gk20a_mm_tlb_invalidate_done(dev_name(g->dev)); |
5415 | 5415 | ||
5416 | out: | 5416 | out: |
5417 | mutex_unlock(&tlb_lock); | 5417 | nvgpu_mutex_release(&tlb_lock); |
5418 | } | 5418 | } |
5419 | 5419 | ||
5420 | int gk20a_mm_suspend(struct gk20a *g) | 5420 | int gk20a_mm_suspend(struct gk20a *g) |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 294dc628..5f29c9e7 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -131,7 +131,7 @@ struct gk20a_buffer_state { | |||
131 | struct list_head list; | 131 | struct list_head list; |
132 | 132 | ||
133 | /* The valid compbits and the fence must be changed atomically. */ | 133 | /* The valid compbits and the fence must be changed atomically. */ |
134 | struct mutex lock; | 134 | struct nvgpu_mutex lock; |
135 | 135 | ||
136 | /* Offset of the surface within the dma-buf whose state is | 136 | /* Offset of the surface within the dma-buf whose state is |
137 | * described by this struct (one dma-buf can contain multiple | 137 | * described by this struct (one dma-buf can contain multiple |
@@ -272,7 +272,7 @@ struct vm_gk20a { | |||
272 | 272 | ||
273 | struct kref ref; | 273 | struct kref ref; |
274 | 274 | ||
275 | struct mutex update_gmmu_lock; | 275 | struct nvgpu_mutex update_gmmu_lock; |
276 | 276 | ||
277 | struct gk20a_mm_entry pdb; | 277 | struct gk20a_mm_entry pdb; |
278 | 278 | ||
@@ -360,7 +360,7 @@ struct mm_gk20a { | |||
360 | struct vm_gk20a vm; | 360 | struct vm_gk20a vm; |
361 | } ce; | 361 | } ce; |
362 | 362 | ||
363 | struct mutex l2_op_lock; | 363 | struct nvgpu_mutex l2_op_lock; |
364 | #ifdef CONFIG_ARCH_TEGRA_18x_SOC | 364 | #ifdef CONFIG_ARCH_TEGRA_18x_SOC |
365 | struct mem_desc bar2_desc; | 365 | struct mem_desc bar2_desc; |
366 | #endif | 366 | #endif |
@@ -395,7 +395,7 @@ struct mm_gk20a { | |||
395 | struct mem_desc sysmem_flush; | 395 | struct mem_desc sysmem_flush; |
396 | 396 | ||
397 | u32 pramin_window; | 397 | u32 pramin_window; |
398 | spinlock_t pramin_window_lock; | 398 | struct nvgpu_spinlock pramin_window_lock; |
399 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0) | 399 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0) |
400 | u32 force_pramin; /* via debugfs */ | 400 | u32 force_pramin; /* via debugfs */ |
401 | #else | 401 | #else |
@@ -413,10 +413,10 @@ struct mm_gk20a { | |||
413 | 413 | ||
414 | u32 ce_ctx_id; | 414 | u32 ce_ctx_id; |
415 | volatile bool cleared; | 415 | volatile bool cleared; |
416 | struct mutex first_clear_mutex; | 416 | struct nvgpu_mutex first_clear_mutex; |
417 | 417 | ||
418 | struct list_head clear_list_head; | 418 | struct list_head clear_list_head; |
419 | struct mutex clear_list_mutex; | 419 | struct nvgpu_mutex clear_list_mutex; |
420 | 420 | ||
421 | struct work_struct clear_mem_worker; | 421 | struct work_struct clear_mem_worker; |
422 | atomic64_t bytes_pending; | 422 | atomic64_t bytes_pending; |
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h index fa0909ee..c841c8e6 100644 --- a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/dma-attrs.h> | 20 | #include <linux/dma-attrs.h> |
21 | #include <linux/version.h> | 21 | #include <linux/version.h> |
22 | 22 | ||
23 | #include <nvgpu/lock.h> | ||
24 | |||
23 | #define GK20A_CLKS_MAX 4 | 25 | #define GK20A_CLKS_MAX 4 |
24 | 26 | ||
25 | struct gk20a; | 27 | struct gk20a; |
@@ -184,7 +186,7 @@ struct gk20a_platform { | |||
184 | 186 | ||
185 | /* Called to turn on the device */ | 187 | /* Called to turn on the device */ |
186 | int (*unrailgate)(struct device *dev); | 188 | int (*unrailgate)(struct device *dev); |
187 | struct mutex railgate_lock; | 189 | struct nvgpu_mutex railgate_lock; |
188 | 190 | ||
189 | /* Called to check state of device */ | 191 | /* Called to check state of device */ |
190 | bool (*is_railgated)(struct device *dev); | 192 | bool (*is_railgated)(struct device *dev); |
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c index 5ba9d25b..225b98e4 100644 --- a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c +++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c | |||
@@ -357,7 +357,7 @@ static void gm20b_tegra_postscale(struct device *dev, unsigned long freq) | |||
357 | emc_rate = tegra_bwmgr_get_max_emc_rate(); | 357 | emc_rate = tegra_bwmgr_get_max_emc_rate(); |
358 | 358 | ||
359 | emc_params->freq_last_set = emc_rate; | 359 | emc_params->freq_last_set = emc_rate; |
360 | mutex_lock(&platform->railgate_lock); | 360 | nvgpu_mutex_acquire(&platform->railgate_lock); |
361 | if (platform->is_railgated && !platform->is_railgated(dev)) | 361 | if (platform->is_railgated && !platform->is_railgated(dev)) |
362 | goto done; | 362 | goto done; |
363 | 363 | ||
@@ -365,7 +365,7 @@ static void gm20b_tegra_postscale(struct device *dev, unsigned long freq) | |||
365 | TEGRA_BWMGR_SET_EMC_FLOOR); | 365 | TEGRA_BWMGR_SET_EMC_FLOOR); |
366 | 366 | ||
367 | done: | 367 | done: |
368 | mutex_unlock(&platform->railgate_lock); | 368 | nvgpu_mutex_release(&platform->railgate_lock); |
369 | } | 369 | } |
370 | 370 | ||
371 | #endif | 371 | #endif |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 6227d523..4ea9b911 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -1414,11 +1414,11 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu) | |||
1414 | struct gk20a *g = gk20a_from_pmu(pmu); | 1414 | struct gk20a *g = gk20a_from_pmu(pmu); |
1415 | struct pmu_v *pv = &g->ops.pmu_ver; | 1415 | struct pmu_v *pv = &g->ops.pmu_ver; |
1416 | 1416 | ||
1417 | mutex_init(&pmu->elpg_mutex); | 1417 | nvgpu_mutex_init(&pmu->elpg_mutex); |
1418 | mutex_init(&pmu->pg_mutex); | 1418 | nvgpu_mutex_init(&pmu->pg_mutex); |
1419 | mutex_init(&pmu->isr_mutex); | 1419 | nvgpu_mutex_init(&pmu->isr_mutex); |
1420 | mutex_init(&pmu->pmu_copy_lock); | 1420 | nvgpu_mutex_init(&pmu->pmu_copy_lock); |
1421 | mutex_init(&pmu->pmu_seq_lock); | 1421 | nvgpu_mutex_init(&pmu->pmu_seq_lock); |
1422 | 1422 | ||
1423 | pmu->remove_support = gk20a_remove_pmu_support; | 1423 | pmu->remove_support = gk20a_remove_pmu_support; |
1424 | 1424 | ||
@@ -2189,7 +2189,7 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu, | |||
2189 | return; | 2189 | return; |
2190 | } | 2190 | } |
2191 | 2191 | ||
2192 | mutex_lock(&pmu->pmu_copy_lock); | 2192 | nvgpu_mutex_acquire(&pmu->pmu_copy_lock); |
2193 | 2193 | ||
2194 | words = size >> 2; | 2194 | words = size >> 2; |
2195 | bytes = size & 0x3; | 2195 | bytes = size & 0x3; |
@@ -2211,7 +2211,7 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu, | |||
2211 | dst[(words << 2) + i] = ((u8 *)&data)[i]; | 2211 | dst[(words << 2) + i] = ((u8 *)&data)[i]; |
2212 | } | 2212 | } |
2213 | } | 2213 | } |
2214 | mutex_unlock(&pmu->pmu_copy_lock); | 2214 | nvgpu_mutex_release(&pmu->pmu_copy_lock); |
2215 | return; | 2215 | return; |
2216 | } | 2216 | } |
2217 | 2217 | ||
@@ -2235,7 +2235,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu, | |||
2235 | return; | 2235 | return; |
2236 | } | 2236 | } |
2237 | 2237 | ||
2238 | mutex_lock(&pmu->pmu_copy_lock); | 2238 | nvgpu_mutex_acquire(&pmu->pmu_copy_lock); |
2239 | 2239 | ||
2240 | words = size >> 2; | 2240 | words = size >> 2; |
2241 | bytes = size & 0x3; | 2241 | bytes = size & 0x3; |
@@ -2265,7 +2265,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu, | |||
2265 | "copy failed. bytes written %d, expected %d", | 2265 | "copy failed. bytes written %d, expected %d", |
2266 | data - dst, size); | 2266 | data - dst, size); |
2267 | } | 2267 | } |
2268 | mutex_unlock(&pmu->pmu_copy_lock); | 2268 | nvgpu_mutex_release(&pmu->pmu_copy_lock); |
2269 | return; | 2269 | return; |
2270 | } | 2270 | } |
2271 | 2271 | ||
@@ -2571,17 +2571,17 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu, | |||
2571 | struct pmu_sequence *seq; | 2571 | struct pmu_sequence *seq; |
2572 | u32 index; | 2572 | u32 index; |
2573 | 2573 | ||
2574 | mutex_lock(&pmu->pmu_seq_lock); | 2574 | nvgpu_mutex_acquire(&pmu->pmu_seq_lock); |
2575 | index = find_first_zero_bit(pmu->pmu_seq_tbl, | 2575 | index = find_first_zero_bit(pmu->pmu_seq_tbl, |
2576 | sizeof(pmu->pmu_seq_tbl)); | 2576 | sizeof(pmu->pmu_seq_tbl)); |
2577 | if (index >= sizeof(pmu->pmu_seq_tbl)) { | 2577 | if (index >= sizeof(pmu->pmu_seq_tbl)) { |
2578 | gk20a_err(dev_from_gk20a(g), | 2578 | gk20a_err(dev_from_gk20a(g), |
2579 | "no free sequence available"); | 2579 | "no free sequence available"); |
2580 | mutex_unlock(&pmu->pmu_seq_lock); | 2580 | nvgpu_mutex_release(&pmu->pmu_seq_lock); |
2581 | return -EAGAIN; | 2581 | return -EAGAIN; |
2582 | } | 2582 | } |
2583 | set_bit(index, pmu->pmu_seq_tbl); | 2583 | set_bit(index, pmu->pmu_seq_tbl); |
2584 | mutex_unlock(&pmu->pmu_seq_lock); | 2584 | nvgpu_mutex_release(&pmu->pmu_seq_lock); |
2585 | 2585 | ||
2586 | seq = &pmu->seq[index]; | 2586 | seq = &pmu->seq[index]; |
2587 | seq->state = PMU_SEQ_STATE_PENDING; | 2587 | seq->state = PMU_SEQ_STATE_PENDING; |
@@ -2616,7 +2616,7 @@ static int pmu_queue_init(struct pmu_gk20a *pmu, | |||
2616 | queue->id = id; | 2616 | queue->id = id; |
2617 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init); | 2617 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init); |
2618 | queue->mutex_id = id; | 2618 | queue->mutex_id = id; |
2619 | mutex_init(&queue->mutex); | 2619 | nvgpu_mutex_init(&queue->mutex); |
2620 | 2620 | ||
2621 | gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x", | 2621 | gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x", |
2622 | id, queue->index, queue->offset, queue->size); | 2622 | id, queue->index, queue->offset, queue->size); |
@@ -2831,7 +2831,7 @@ static int pmu_queue_lock(struct pmu_gk20a *pmu, | |||
2831 | return 0; | 2831 | return 0; |
2832 | 2832 | ||
2833 | if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { | 2833 | if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { |
2834 | mutex_lock(&queue->mutex); | 2834 | nvgpu_mutex_acquire(&queue->mutex); |
2835 | return 0; | 2835 | return 0; |
2836 | } | 2836 | } |
2837 | 2837 | ||
@@ -2848,7 +2848,7 @@ static int pmu_queue_unlock(struct pmu_gk20a *pmu, | |||
2848 | return 0; | 2848 | return 0; |
2849 | 2849 | ||
2850 | if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { | 2850 | if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) { |
2851 | mutex_unlock(&queue->mutex); | 2851 | nvgpu_mutex_release(&queue->mutex); |
2852 | return 0; | 2852 | return 0; |
2853 | } | 2853 | } |
2854 | 2854 | ||
@@ -3245,10 +3245,10 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g) | |||
3245 | 3245 | ||
3246 | gk20a_dbg_fn(""); | 3246 | gk20a_dbg_fn(""); |
3247 | 3247 | ||
3248 | mutex_lock(&pmu->isr_mutex); | 3248 | nvgpu_mutex_acquire(&pmu->isr_mutex); |
3249 | pmu_reset(pmu); | 3249 | pmu_reset(pmu); |
3250 | pmu->isr_enabled = true; | 3250 | pmu->isr_enabled = true; |
3251 | mutex_unlock(&pmu->isr_mutex); | 3251 | nvgpu_mutex_release(&pmu->isr_mutex); |
3252 | 3252 | ||
3253 | /* setup apertures - virtual */ | 3253 | /* setup apertures - virtual */ |
3254 | gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), | 3254 | gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), |
@@ -4530,9 +4530,9 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
4530 | 4530 | ||
4531 | gk20a_dbg_fn(""); | 4531 | gk20a_dbg_fn(""); |
4532 | 4532 | ||
4533 | mutex_lock(&pmu->isr_mutex); | 4533 | nvgpu_mutex_acquire(&pmu->isr_mutex); |
4534 | if (!pmu->isr_enabled) { | 4534 | if (!pmu->isr_enabled) { |
4535 | mutex_unlock(&pmu->isr_mutex); | 4535 | nvgpu_mutex_release(&pmu->isr_mutex); |
4536 | return; | 4536 | return; |
4537 | } | 4537 | } |
4538 | 4538 | ||
@@ -4546,7 +4546,7 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
4546 | intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; | 4546 | intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; |
4547 | if (!intr || pmu->pmu_state == PMU_STATE_OFF) { | 4547 | if (!intr || pmu->pmu_state == PMU_STATE_OFF) { |
4548 | gk20a_writel(g, pwr_falcon_irqsclr_r(), intr); | 4548 | gk20a_writel(g, pwr_falcon_irqsclr_r(), intr); |
4549 | mutex_unlock(&pmu->isr_mutex); | 4549 | nvgpu_mutex_release(&pmu->isr_mutex); |
4550 | return; | 4550 | return; |
4551 | } | 4551 | } |
4552 | 4552 | ||
@@ -4583,7 +4583,7 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
4583 | pwr_falcon_irqsset_swgen0_set_f()); | 4583 | pwr_falcon_irqsset_swgen0_set_f()); |
4584 | } | 4584 | } |
4585 | 4585 | ||
4586 | mutex_unlock(&pmu->isr_mutex); | 4586 | nvgpu_mutex_release(&pmu->isr_mutex); |
4587 | } | 4587 | } |
4588 | 4588 | ||
4589 | static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, | 4589 | static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd, |
@@ -4987,7 +4987,7 @@ int gk20a_pmu_enable_elpg(struct gk20a *g) | |||
4987 | if (!support_gk20a_pmu(g->dev)) | 4987 | if (!support_gk20a_pmu(g->dev)) |
4988 | return ret; | 4988 | return ret; |
4989 | 4989 | ||
4990 | mutex_lock(&pmu->elpg_mutex); | 4990 | nvgpu_mutex_acquire(&pmu->elpg_mutex); |
4991 | 4991 | ||
4992 | pmu->elpg_refcnt++; | 4992 | pmu->elpg_refcnt++; |
4993 | if (pmu->elpg_refcnt <= 0) | 4993 | if (pmu->elpg_refcnt <= 0) |
@@ -5026,7 +5026,7 @@ int gk20a_pmu_enable_elpg(struct gk20a *g) | |||
5026 | } | 5026 | } |
5027 | 5027 | ||
5028 | exit_unlock: | 5028 | exit_unlock: |
5029 | mutex_unlock(&pmu->elpg_mutex); | 5029 | nvgpu_mutex_release(&pmu->elpg_mutex); |
5030 | gk20a_dbg_fn("done"); | 5030 | gk20a_dbg_fn("done"); |
5031 | return ret; | 5031 | return ret; |
5032 | } | 5032 | } |
@@ -5049,7 +5049,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g) | |||
5049 | if (!support_gk20a_pmu(g->dev)) | 5049 | if (!support_gk20a_pmu(g->dev)) |
5050 | return ret; | 5050 | return ret; |
5051 | 5051 | ||
5052 | mutex_lock(&pmu->elpg_mutex); | 5052 | nvgpu_mutex_acquire(&pmu->elpg_mutex); |
5053 | 5053 | ||
5054 | pmu->elpg_refcnt--; | 5054 | pmu->elpg_refcnt--; |
5055 | if (pmu->elpg_refcnt > 0) { | 5055 | if (pmu->elpg_refcnt > 0) { |
@@ -5138,7 +5138,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g) | |||
5138 | 5138 | ||
5139 | exit_reschedule: | 5139 | exit_reschedule: |
5140 | exit_unlock: | 5140 | exit_unlock: |
5141 | mutex_unlock(&pmu->elpg_mutex); | 5141 | nvgpu_mutex_release(&pmu->elpg_mutex); |
5142 | gk20a_dbg_fn("done"); | 5142 | gk20a_dbg_fn("done"); |
5143 | return ret; | 5143 | return ret; |
5144 | } | 5144 | } |
@@ -5182,9 +5182,9 @@ int gk20a_pmu_destroy(struct gk20a *g) | |||
5182 | g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time; | 5182 | g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time; |
5183 | g->pg_gating_cnt += pg_stat_data.gating_cnt; | 5183 | g->pg_gating_cnt += pg_stat_data.gating_cnt; |
5184 | 5184 | ||
5185 | mutex_lock(&pmu->isr_mutex); | 5185 | nvgpu_mutex_acquire(&pmu->isr_mutex); |
5186 | pmu->isr_enabled = false; | 5186 | pmu->isr_enabled = false; |
5187 | mutex_unlock(&pmu->isr_mutex); | 5187 | nvgpu_mutex_release(&pmu->isr_mutex); |
5188 | 5188 | ||
5189 | pmu->pmu_state = PMU_STATE_OFF; | 5189 | pmu->pmu_state = PMU_STATE_OFF; |
5190 | pmu->pmu_ready = false; | 5190 | pmu->pmu_ready = false; |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index e4513457..c1583eab 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |||
@@ -136,7 +136,7 @@ struct pmu_queue { | |||
136 | u32 mutex_id; | 136 | u32 mutex_id; |
137 | u32 mutex_lock; | 137 | u32 mutex_lock; |
138 | /* used by sw, for LPQ/HPQ queue */ | 138 | /* used by sw, for LPQ/HPQ queue */ |
139 | struct mutex mutex; | 139 | struct nvgpu_mutex mutex; |
140 | 140 | ||
141 | /* current write position */ | 141 | /* current write position */ |
142 | u32 position; | 142 | u32 position; |
@@ -334,8 +334,8 @@ struct pmu_gk20a { | |||
334 | struct pmu_mutex *mutex; | 334 | struct pmu_mutex *mutex; |
335 | u32 mutex_cnt; | 335 | u32 mutex_cnt; |
336 | 336 | ||
337 | struct mutex pmu_copy_lock; | 337 | struct nvgpu_mutex pmu_copy_lock; |
338 | struct mutex pmu_seq_lock; | 338 | struct nvgpu_mutex pmu_seq_lock; |
339 | 339 | ||
340 | struct nvgpu_allocator dmem; | 340 | struct nvgpu_allocator dmem; |
341 | 341 | ||
@@ -355,8 +355,8 @@ struct pmu_gk20a { | |||
355 | 355 | ||
356 | #define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ | 356 | #define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ |
357 | struct work_struct pg_init; | 357 | struct work_struct pg_init; |
358 | struct mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */ | 358 | struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */ |
359 | struct mutex elpg_mutex; /* protect elpg enable/disable */ | 359 | struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */ |
360 | int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */ | 360 | int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */ |
361 | 361 | ||
362 | union { | 362 | union { |
@@ -375,7 +375,7 @@ struct pmu_gk20a { | |||
375 | u32 load_shadow; | 375 | u32 load_shadow; |
376 | u32 load_avg; | 376 | u32 load_avg; |
377 | 377 | ||
378 | struct mutex isr_mutex; | 378 | struct nvgpu_mutex isr_mutex; |
379 | bool isr_enabled; | 379 | bool isr_enabled; |
380 | 380 | ||
381 | bool zbc_ready; | 381 | bool zbc_ready; |
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c index 20cd1232..6fdc2774 100644 --- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c | |||
@@ -46,29 +46,29 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf, | |||
46 | return -EINVAL; | 46 | return -EINVAL; |
47 | size = sizeof(event); | 47 | size = sizeof(event); |
48 | 48 | ||
49 | mutex_lock(&sched->status_lock); | 49 | nvgpu_mutex_acquire(&sched->status_lock); |
50 | while (!sched->status) { | 50 | while (!sched->status) { |
51 | mutex_unlock(&sched->status_lock); | 51 | nvgpu_mutex_release(&sched->status_lock); |
52 | if (filp->f_flags & O_NONBLOCK) | 52 | if (filp->f_flags & O_NONBLOCK) |
53 | return -EAGAIN; | 53 | return -EAGAIN; |
54 | err = wait_event_interruptible(sched->readout_wq, | 54 | err = wait_event_interruptible(sched->readout_wq, |
55 | sched->status); | 55 | sched->status); |
56 | if (err) | 56 | if (err) |
57 | return err; | 57 | return err; |
58 | mutex_lock(&sched->status_lock); | 58 | nvgpu_mutex_acquire(&sched->status_lock); |
59 | } | 59 | } |
60 | 60 | ||
61 | event.reserved = 0; | 61 | event.reserved = 0; |
62 | event.status = sched->status; | 62 | event.status = sched->status; |
63 | 63 | ||
64 | if (copy_to_user(buf, &event, size)) { | 64 | if (copy_to_user(buf, &event, size)) { |
65 | mutex_unlock(&sched->status_lock); | 65 | nvgpu_mutex_release(&sched->status_lock); |
66 | return -EFAULT; | 66 | return -EFAULT; |
67 | } | 67 | } |
68 | 68 | ||
69 | sched->status = 0; | 69 | sched->status = 0; |
70 | 70 | ||
71 | mutex_unlock(&sched->status_lock); | 71 | nvgpu_mutex_release(&sched->status_lock); |
72 | 72 | ||
73 | return size; | 73 | return size; |
74 | } | 74 | } |
@@ -80,11 +80,11 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait) | |||
80 | 80 | ||
81 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); | 81 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); |
82 | 82 | ||
83 | mutex_lock(&sched->status_lock); | 83 | nvgpu_mutex_acquire(&sched->status_lock); |
84 | poll_wait(filp, &sched->readout_wq, wait); | 84 | poll_wait(filp, &sched->readout_wq, wait); |
85 | if (sched->status) | 85 | if (sched->status) |
86 | mask |= POLLIN | POLLRDNORM; | 86 | mask |= POLLIN | POLLRDNORM; |
87 | mutex_unlock(&sched->status_lock); | 87 | nvgpu_mutex_release(&sched->status_lock); |
88 | 88 | ||
89 | return mask; | 89 | return mask; |
90 | } | 90 | } |
@@ -100,13 +100,13 @@ static int gk20a_sched_dev_ioctl_get_tsgs(struct gk20a_sched_ctrl *sched, | |||
100 | return -ENOSPC; | 100 | return -ENOSPC; |
101 | } | 101 | } |
102 | 102 | ||
103 | mutex_lock(&sched->status_lock); | 103 | nvgpu_mutex_acquire(&sched->status_lock); |
104 | if (copy_to_user((void __user *)(uintptr_t)arg->buffer, | 104 | if (copy_to_user((void __user *)(uintptr_t)arg->buffer, |
105 | sched->active_tsg_bitmap, sched->bitmap_size)) { | 105 | sched->active_tsg_bitmap, sched->bitmap_size)) { |
106 | mutex_unlock(&sched->status_lock); | 106 | nvgpu_mutex_release(&sched->status_lock); |
107 | return -EFAULT; | 107 | return -EFAULT; |
108 | } | 108 | } |
109 | mutex_unlock(&sched->status_lock); | 109 | nvgpu_mutex_release(&sched->status_lock); |
110 | 110 | ||
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
@@ -122,15 +122,15 @@ static int gk20a_sched_dev_ioctl_get_recent_tsgs(struct gk20a_sched_ctrl *sched, | |||
122 | return -ENOSPC; | 122 | return -ENOSPC; |
123 | } | 123 | } |
124 | 124 | ||
125 | mutex_lock(&sched->status_lock); | 125 | nvgpu_mutex_acquire(&sched->status_lock); |
126 | if (copy_to_user((void __user *)(uintptr_t)arg->buffer, | 126 | if (copy_to_user((void __user *)(uintptr_t)arg->buffer, |
127 | sched->recent_tsg_bitmap, sched->bitmap_size)) { | 127 | sched->recent_tsg_bitmap, sched->bitmap_size)) { |
128 | mutex_unlock(&sched->status_lock); | 128 | nvgpu_mutex_release(&sched->status_lock); |
129 | return -EFAULT; | 129 | return -EFAULT; |
130 | } | 130 | } |
131 | 131 | ||
132 | memset(sched->recent_tsg_bitmap, 0, sched->bitmap_size); | 132 | memset(sched->recent_tsg_bitmap, 0, sched->bitmap_size); |
133 | mutex_unlock(&sched->status_lock); | 133 | nvgpu_mutex_release(&sched->status_lock); |
134 | 134 | ||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
@@ -158,7 +158,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, | |||
158 | if (!bitmap) | 158 | if (!bitmap) |
159 | return -ENOMEM; | 159 | return -ENOMEM; |
160 | 160 | ||
161 | mutex_lock(&sched->status_lock); | 161 | nvgpu_mutex_acquire(&sched->status_lock); |
162 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { | 162 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { |
163 | if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) { | 163 | if (NVGPU_SCHED_ISSET(tsgid, sched->active_tsg_bitmap)) { |
164 | tsg = &f->tsg[tsgid]; | 164 | tsg = &f->tsg[tsgid]; |
@@ -166,7 +166,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, | |||
166 | NVGPU_SCHED_SET(tsgid, bitmap); | 166 | NVGPU_SCHED_SET(tsgid, bitmap); |
167 | } | 167 | } |
168 | } | 168 | } |
169 | mutex_unlock(&sched->status_lock); | 169 | nvgpu_mutex_release(&sched->status_lock); |
170 | 170 | ||
171 | if (copy_to_user((void __user *)(uintptr_t)arg->buffer, | 171 | if (copy_to_user((void __user *)(uintptr_t)arg->buffer, |
172 | bitmap, sched->bitmap_size)) | 172 | bitmap, sched->bitmap_size)) |
@@ -283,9 +283,9 @@ static int gk20a_sched_dev_ioctl_lock_control(struct gk20a_sched_ctrl *sched) | |||
283 | { | 283 | { |
284 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); | 284 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); |
285 | 285 | ||
286 | mutex_lock(&sched->control_lock); | 286 | nvgpu_mutex_acquire(&sched->control_lock); |
287 | sched->control_locked = true; | 287 | sched->control_locked = true; |
288 | mutex_unlock(&sched->control_lock); | 288 | nvgpu_mutex_release(&sched->control_lock); |
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | 291 | ||
@@ -293,9 +293,9 @@ static int gk20a_sched_dev_ioctl_unlock_control(struct gk20a_sched_ctrl *sched) | |||
293 | { | 293 | { |
294 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); | 294 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, ""); |
295 | 295 | ||
296 | mutex_lock(&sched->control_lock); | 296 | nvgpu_mutex_acquire(&sched->control_lock); |
297 | sched->control_locked = false; | 297 | sched->control_locked = false; |
298 | mutex_unlock(&sched->control_lock); | 298 | nvgpu_mutex_release(&sched->control_lock); |
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
301 | 301 | ||
@@ -325,12 +325,12 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, | |||
325 | if (!kref_get_unless_zero(&tsg->refcount)) | 325 | if (!kref_get_unless_zero(&tsg->refcount)) |
326 | return -ENXIO; | 326 | return -ENXIO; |
327 | 327 | ||
328 | mutex_lock(&sched->status_lock); | 328 | nvgpu_mutex_acquire(&sched->status_lock); |
329 | if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { | 329 | if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { |
330 | gk20a_warn(dev_from_gk20a(g), | 330 | gk20a_warn(dev_from_gk20a(g), |
331 | "tsgid=%d already referenced", tsgid); | 331 | "tsgid=%d already referenced", tsgid); |
332 | /* unlock status_lock as gk20a_tsg_release locks it */ | 332 | /* unlock status_lock as gk20a_tsg_release locks it */ |
333 | mutex_unlock(&sched->status_lock); | 333 | nvgpu_mutex_release(&sched->status_lock); |
334 | kref_put(&tsg->refcount, gk20a_tsg_release); | 334 | kref_put(&tsg->refcount, gk20a_tsg_release); |
335 | return -ENXIO; | 335 | return -ENXIO; |
336 | } | 336 | } |
@@ -339,7 +339,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, | |||
339 | * NVGPU_SCHED_IOCTL_PUT_TSG ioctl, or close | 339 | * NVGPU_SCHED_IOCTL_PUT_TSG ioctl, or close |
340 | */ | 340 | */ |
341 | NVGPU_SCHED_SET(tsgid, sched->ref_tsg_bitmap); | 341 | NVGPU_SCHED_SET(tsgid, sched->ref_tsg_bitmap); |
342 | mutex_unlock(&sched->status_lock); | 342 | nvgpu_mutex_release(&sched->status_lock); |
343 | 343 | ||
344 | return 0; | 344 | return 0; |
345 | } | 345 | } |
@@ -357,15 +357,15 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched, | |||
357 | if (tsgid >= f->num_channels) | 357 | if (tsgid >= f->num_channels) |
358 | return -EINVAL; | 358 | return -EINVAL; |
359 | 359 | ||
360 | mutex_lock(&sched->status_lock); | 360 | nvgpu_mutex_acquire(&sched->status_lock); |
361 | if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { | 361 | if (!NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { |
362 | mutex_unlock(&sched->status_lock); | 362 | nvgpu_mutex_release(&sched->status_lock); |
363 | gk20a_warn(dev_from_gk20a(g), | 363 | gk20a_warn(dev_from_gk20a(g), |
364 | "tsgid=%d not previously referenced", tsgid); | 364 | "tsgid=%d not previously referenced", tsgid); |
365 | return -ENXIO; | 365 | return -ENXIO; |
366 | } | 366 | } |
367 | NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap); | 367 | NVGPU_SCHED_CLR(tsgid, sched->ref_tsg_bitmap); |
368 | mutex_unlock(&sched->status_lock); | 368 | nvgpu_mutex_release(&sched->status_lock); |
369 | 369 | ||
370 | tsg = &f->tsg[tsgid]; | 370 | tsg = &f->tsg[tsgid]; |
371 | kref_put(&tsg->refcount, gk20a_tsg_release); | 371 | kref_put(&tsg->refcount, gk20a_tsg_release); |
@@ -390,7 +390,7 @@ int gk20a_sched_dev_open(struct inode *inode, struct file *filp) | |||
390 | gk20a_idle(g->dev); | 390 | gk20a_idle(g->dev); |
391 | } | 391 | } |
392 | 392 | ||
393 | if (!mutex_trylock(&sched->busy_lock)) | 393 | if (!nvgpu_mutex_tryacquire(&sched->busy_lock)) |
394 | return -EBUSY; | 394 | return -EBUSY; |
395 | 395 | ||
396 | memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap, | 396 | memcpy(sched->recent_tsg_bitmap, sched->active_tsg_bitmap, |
@@ -506,11 +506,11 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp) | |||
506 | } | 506 | } |
507 | 507 | ||
508 | /* unlock control */ | 508 | /* unlock control */ |
509 | mutex_lock(&sched->control_lock); | 509 | nvgpu_mutex_acquire(&sched->control_lock); |
510 | sched->control_locked = false; | 510 | sched->control_locked = false; |
511 | mutex_unlock(&sched->control_lock); | 511 | nvgpu_mutex_release(&sched->control_lock); |
512 | 512 | ||
513 | mutex_unlock(&sched->busy_lock); | 513 | nvgpu_mutex_release(&sched->busy_lock); |
514 | return 0; | 514 | return 0; |
515 | } | 515 | } |
516 | 516 | ||
@@ -530,16 +530,16 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused) | |||
530 | if (err) | 530 | if (err) |
531 | return err; | 531 | return err; |
532 | 532 | ||
533 | if (mutex_trylock(&sched->busy_lock)) { | 533 | if (nvgpu_mutex_tryacquire(&sched->busy_lock)) { |
534 | sched_busy = false; | 534 | sched_busy = false; |
535 | mutex_unlock(&sched->busy_lock); | 535 | nvgpu_mutex_release(&sched->busy_lock); |
536 | } | 536 | } |
537 | 537 | ||
538 | seq_printf(s, "control_locked=%d\n", sched->control_locked); | 538 | seq_printf(s, "control_locked=%d\n", sched->control_locked); |
539 | seq_printf(s, "busy=%d\n", sched_busy); | 539 | seq_printf(s, "busy=%d\n", sched_busy); |
540 | seq_printf(s, "bitmap_size=%zu\n", sched->bitmap_size); | 540 | seq_printf(s, "bitmap_size=%zu\n", sched->bitmap_size); |
541 | 541 | ||
542 | mutex_lock(&sched->status_lock); | 542 | nvgpu_mutex_acquire(&sched->status_lock); |
543 | 543 | ||
544 | seq_puts(s, "active_tsg_bitmap\n"); | 544 | seq_puts(s, "active_tsg_bitmap\n"); |
545 | for (i = 0; i < n; i++) | 545 | for (i = 0; i < n; i++) |
@@ -549,7 +549,7 @@ static int gk20a_sched_debugfs_show(struct seq_file *s, void *unused) | |||
549 | for (i = 0; i < n; i++) | 549 | for (i = 0; i < n; i++) |
550 | seq_printf(s, "\t0x%016llx\n", sched->recent_tsg_bitmap[i]); | 550 | seq_printf(s, "\t0x%016llx\n", sched->recent_tsg_bitmap[i]); |
551 | 551 | ||
552 | mutex_unlock(&sched->status_lock); | 552 | nvgpu_mutex_release(&sched->status_lock); |
553 | 553 | ||
554 | gk20a_idle(g->dev); | 554 | gk20a_idle(g->dev); |
555 | 555 | ||
@@ -594,11 +594,11 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg) | |||
594 | gk20a_idle(g->dev); | 594 | gk20a_idle(g->dev); |
595 | } | 595 | } |
596 | 596 | ||
597 | mutex_lock(&sched->status_lock); | 597 | nvgpu_mutex_acquire(&sched->status_lock); |
598 | NVGPU_SCHED_SET(tsg->tsgid, sched->active_tsg_bitmap); | 598 | NVGPU_SCHED_SET(tsg->tsgid, sched->active_tsg_bitmap); |
599 | NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap); | 599 | NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap); |
600 | sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN; | 600 | sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN; |
601 | mutex_unlock(&sched->status_lock); | 601 | nvgpu_mutex_release(&sched->status_lock); |
602 | wake_up_interruptible(&sched->readout_wq); | 602 | wake_up_interruptible(&sched->readout_wq); |
603 | } | 603 | } |
604 | 604 | ||
@@ -608,7 +608,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg) | |||
608 | 608 | ||
609 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); | 609 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); |
610 | 610 | ||
611 | mutex_lock(&sched->status_lock); | 611 | nvgpu_mutex_acquire(&sched->status_lock); |
612 | NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); | 612 | NVGPU_SCHED_CLR(tsg->tsgid, sched->active_tsg_bitmap); |
613 | 613 | ||
614 | /* clear recent_tsg_bitmap as well: if app manager did not | 614 | /* clear recent_tsg_bitmap as well: if app manager did not |
@@ -621,7 +621,7 @@ void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg) | |||
621 | /* do not set event_pending, we only want to notify app manager | 621 | /* do not set event_pending, we only want to notify app manager |
622 | * when TSGs are added, so that it can apply sched params | 622 | * when TSGs are added, so that it can apply sched params |
623 | */ | 623 | */ |
624 | mutex_unlock(&sched->status_lock); | 624 | nvgpu_mutex_release(&sched->status_lock); |
625 | } | 625 | } |
626 | 626 | ||
627 | int gk20a_sched_ctrl_init(struct gk20a *g) | 627 | int gk20a_sched_ctrl_init(struct gk20a *g) |
@@ -652,9 +652,9 @@ int gk20a_sched_ctrl_init(struct gk20a *g) | |||
652 | goto free_recent; | 652 | goto free_recent; |
653 | 653 | ||
654 | init_waitqueue_head(&sched->readout_wq); | 654 | init_waitqueue_head(&sched->readout_wq); |
655 | mutex_init(&sched->status_lock); | 655 | nvgpu_mutex_init(&sched->status_lock); |
656 | mutex_init(&sched->control_lock); | 656 | nvgpu_mutex_init(&sched->control_lock); |
657 | mutex_init(&sched->busy_lock); | 657 | nvgpu_mutex_init(&sched->busy_lock); |
658 | 658 | ||
659 | sched->sw_ready = true; | 659 | sched->sw_ready = true; |
660 | 660 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.h b/drivers/gpu/nvgpu/gk20a/sched_gk20a.h index 0ae13783..1f983678 100644 --- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | 5 | * under the terms and conditions of the GNU General Public License, |
@@ -21,11 +21,11 @@ struct tsg_gk20a; | |||
21 | struct gk20a_sched_ctrl { | 21 | struct gk20a_sched_ctrl { |
22 | struct gk20a *g; | 22 | struct gk20a *g; |
23 | 23 | ||
24 | struct mutex control_lock; | 24 | struct nvgpu_mutex control_lock; |
25 | bool control_locked; | 25 | bool control_locked; |
26 | bool sw_ready; | 26 | bool sw_ready; |
27 | struct mutex status_lock; | 27 | struct nvgpu_mutex status_lock; |
28 | struct mutex busy_lock; | 28 | struct nvgpu_mutex busy_lock; |
29 | 29 | ||
30 | u64 status; | 30 | u64 status; |
31 | 31 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c index edfe3deb..f57871d5 100644 --- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c | |||
@@ -20,8 +20,7 @@ | |||
20 | #include <linux/hrtimer.h> | 20 | #include <linux/hrtimer.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/spinlock.h> | 23 | #include <nvgpu/lock.h> |
24 | |||
25 | #include <uapi/linux/nvgpu.h> | 24 | #include <uapi/linux/nvgpu.h> |
26 | 25 | ||
27 | #include <nvgpu/semaphore.h> | 26 | #include <nvgpu/semaphore.h> |
@@ -55,7 +54,7 @@ struct gk20a_sync_pt { | |||
55 | * than a mutex - there should be very little contention on this | 54 | * than a mutex - there should be very little contention on this |
56 | * lock. | 55 | * lock. |
57 | */ | 56 | */ |
58 | spinlock_t lock; | 57 | struct nvgpu_spinlock lock; |
59 | }; | 58 | }; |
60 | 59 | ||
61 | struct gk20a_sync_pt_inst { | 60 | struct gk20a_sync_pt_inst { |
@@ -242,7 +241,7 @@ static struct gk20a_sync_pt *gk20a_sync_pt_create_shared( | |||
242 | } | 241 | } |
243 | } | 242 | } |
244 | 243 | ||
245 | spin_lock_init(&shared->lock); | 244 | nvgpu_spinlock_init(&shared->lock); |
246 | 245 | ||
247 | nvgpu_semaphore_get(sema); | 246 | nvgpu_semaphore_get(sema); |
248 | 247 | ||
@@ -304,7 +303,7 @@ static int gk20a_sync_pt_has_signaled(struct sync_pt *sync_pt) | |||
304 | #endif | 303 | #endif |
305 | bool signaled = true; | 304 | bool signaled = true; |
306 | 305 | ||
307 | spin_lock(&pt->lock); | 306 | nvgpu_spinlock_acquire(&pt->lock); |
308 | if (!pt->sema) | 307 | if (!pt->sema) |
309 | goto done; | 308 | goto done; |
310 | 309 | ||
@@ -345,7 +344,7 @@ static int gk20a_sync_pt_has_signaled(struct sync_pt *sync_pt) | |||
345 | pt->sema = NULL; | 344 | pt->sema = NULL; |
346 | } | 345 | } |
347 | done: | 346 | done: |
348 | spin_unlock(&pt->lock); | 347 | nvgpu_spinlock_release(&pt->lock); |
349 | 348 | ||
350 | return signaled; | 349 | return signaled; |
351 | } | 350 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 96d6873d..aadf5463 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | |||
@@ -169,7 +169,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) | |||
169 | init_rwsem(&tsg->ch_list_lock); | 169 | init_rwsem(&tsg->ch_list_lock); |
170 | 170 | ||
171 | INIT_LIST_HEAD(&tsg->event_id_list); | 171 | INIT_LIST_HEAD(&tsg->event_id_list); |
172 | mutex_init(&tsg->event_id_list_lock); | 172 | nvgpu_mutex_init(&tsg->event_id_list_lock); |
173 | 173 | ||
174 | return 0; | 174 | return 0; |
175 | } | 175 | } |
@@ -204,7 +204,7 @@ static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg, | |||
204 | struct gk20a_event_id_data *local_event_id_data; | 204 | struct gk20a_event_id_data *local_event_id_data; |
205 | bool event_found = false; | 205 | bool event_found = false; |
206 | 206 | ||
207 | mutex_lock(&tsg->event_id_list_lock); | 207 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); |
208 | list_for_each_entry(local_event_id_data, &tsg->event_id_list, | 208 | list_for_each_entry(local_event_id_data, &tsg->event_id_list, |
209 | event_id_node) { | 209 | event_id_node) { |
210 | if (local_event_id_data->event_id == event_id) { | 210 | if (local_event_id_data->event_id == event_id) { |
@@ -212,7 +212,7 @@ static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg, | |||
212 | break; | 212 | break; |
213 | } | 213 | } |
214 | } | 214 | } |
215 | mutex_unlock(&tsg->event_id_list_lock); | 215 | nvgpu_mutex_release(&tsg->event_id_list_lock); |
216 | 216 | ||
217 | if (event_found) { | 217 | if (event_found) { |
218 | *event_id_data = local_event_id_data; | 218 | *event_id_data = local_event_id_data; |
@@ -233,7 +233,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, | |||
233 | if (err) | 233 | if (err) |
234 | return; | 234 | return; |
235 | 235 | ||
236 | mutex_lock(&event_id_data->lock); | 236 | nvgpu_mutex_acquire(&event_id_data->lock); |
237 | 237 | ||
238 | gk20a_dbg_info( | 238 | gk20a_dbg_info( |
239 | "posting event for event_id=%d on tsg=%d\n", | 239 | "posting event for event_id=%d on tsg=%d\n", |
@@ -242,7 +242,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, | |||
242 | 242 | ||
243 | wake_up_interruptible_all(&event_id_data->event_id_wq); | 243 | wake_up_interruptible_all(&event_id_data->event_id_wq); |
244 | 244 | ||
245 | mutex_unlock(&event_id_data->lock); | 245 | nvgpu_mutex_release(&event_id_data->lock); |
246 | } | 246 | } |
247 | 247 | ||
248 | static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg, | 248 | static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg, |
@@ -287,12 +287,12 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg, | |||
287 | event_id_data->event_id = event_id; | 287 | event_id_data->event_id = event_id; |
288 | 288 | ||
289 | init_waitqueue_head(&event_id_data->event_id_wq); | 289 | init_waitqueue_head(&event_id_data->event_id_wq); |
290 | mutex_init(&event_id_data->lock); | 290 | nvgpu_mutex_init(&event_id_data->lock); |
291 | INIT_LIST_HEAD(&event_id_data->event_id_node); | 291 | INIT_LIST_HEAD(&event_id_data->event_id_node); |
292 | 292 | ||
293 | mutex_lock(&tsg->event_id_list_lock); | 293 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); |
294 | list_add_tail(&event_id_data->event_id_node, &tsg->event_id_list); | 294 | list_add_tail(&event_id_data->event_id_node, &tsg->event_id_list); |
295 | mutex_unlock(&tsg->event_id_list_lock); | 295 | nvgpu_mutex_release(&tsg->event_id_list_lock); |
296 | 296 | ||
297 | fd_install(local_fd, file); | 297 | fd_install(local_fd, file); |
298 | file->private_data = event_id_data; | 298 | file->private_data = event_id_data; |
@@ -370,9 +370,9 @@ int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) | |||
370 | 370 | ||
371 | static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) | 371 | static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) |
372 | { | 372 | { |
373 | mutex_lock(&f->tsg_inuse_mutex); | 373 | nvgpu_mutex_acquire(&f->tsg_inuse_mutex); |
374 | f->tsg[tsg->tsgid].in_use = false; | 374 | f->tsg[tsg->tsgid].in_use = false; |
375 | mutex_unlock(&f->tsg_inuse_mutex); | 375 | nvgpu_mutex_release(&f->tsg_inuse_mutex); |
376 | } | 376 | } |
377 | 377 | ||
378 | static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f) | 378 | static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f) |
@@ -380,7 +380,7 @@ static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f) | |||
380 | struct tsg_gk20a *tsg = NULL; | 380 | struct tsg_gk20a *tsg = NULL; |
381 | unsigned int tsgid; | 381 | unsigned int tsgid; |
382 | 382 | ||
383 | mutex_lock(&f->tsg_inuse_mutex); | 383 | nvgpu_mutex_acquire(&f->tsg_inuse_mutex); |
384 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { | 384 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { |
385 | if (!f->tsg[tsgid].in_use) { | 385 | if (!f->tsg[tsgid].in_use) { |
386 | f->tsg[tsgid].in_use = true; | 386 | f->tsg[tsgid].in_use = true; |
@@ -388,7 +388,7 @@ static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f) | |||
388 | break; | 388 | break; |
389 | } | 389 | } |
390 | } | 390 | } |
391 | mutex_unlock(&f->tsg_inuse_mutex); | 391 | nvgpu_mutex_release(&f->tsg_inuse_mutex); |
392 | 392 | ||
393 | return tsg; | 393 | return tsg; |
394 | } | 394 | } |
@@ -482,13 +482,13 @@ void gk20a_tsg_release(struct kref *ref) | |||
482 | gk20a_sched_ctrl_tsg_removed(g, tsg); | 482 | gk20a_sched_ctrl_tsg_removed(g, tsg); |
483 | 483 | ||
484 | /* unhook all events created on this TSG */ | 484 | /* unhook all events created on this TSG */ |
485 | mutex_lock(&tsg->event_id_list_lock); | 485 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); |
486 | list_for_each_entry_safe(event_id_data, event_id_data_temp, | 486 | list_for_each_entry_safe(event_id_data, event_id_data_temp, |
487 | &tsg->event_id_list, | 487 | &tsg->event_id_list, |
488 | event_id_node) { | 488 | event_id_node) { |
489 | list_del_init(&event_id_data->event_id_node); | 489 | list_del_init(&event_id_data->event_id_node); |
490 | } | 490 | } |
491 | mutex_unlock(&tsg->event_id_list_lock); | 491 | nvgpu_mutex_release(&tsg->event_id_list_lock); |
492 | 492 | ||
493 | release_used_tsg(&g->fifo, tsg); | 493 | release_used_tsg(&g->fifo, tsg); |
494 | 494 | ||
@@ -517,7 +517,7 @@ static int gk20a_tsg_ioctl_set_priority(struct gk20a *g, | |||
517 | struct gk20a_sched_ctrl *sched = &g->sched_ctrl; | 517 | struct gk20a_sched_ctrl *sched = &g->sched_ctrl; |
518 | int err; | 518 | int err; |
519 | 519 | ||
520 | mutex_lock(&sched->control_lock); | 520 | nvgpu_mutex_acquire(&sched->control_lock); |
521 | if (sched->control_locked) { | 521 | if (sched->control_locked) { |
522 | err = -EPERM; | 522 | err = -EPERM; |
523 | goto done; | 523 | goto done; |
@@ -533,7 +533,7 @@ static int gk20a_tsg_ioctl_set_priority(struct gk20a *g, | |||
533 | 533 | ||
534 | gk20a_idle(g->dev); | 534 | gk20a_idle(g->dev); |
535 | done: | 535 | done: |
536 | mutex_unlock(&sched->control_lock); | 536 | nvgpu_mutex_release(&sched->control_lock); |
537 | return err; | 537 | return err; |
538 | } | 538 | } |
539 | 539 | ||
@@ -545,7 +545,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g, | |||
545 | 545 | ||
546 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); | 546 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); |
547 | 547 | ||
548 | mutex_lock(&sched->control_lock); | 548 | nvgpu_mutex_acquire(&sched->control_lock); |
549 | if (sched->control_locked) { | 549 | if (sched->control_locked) { |
550 | err = -EPERM; | 550 | err = -EPERM; |
551 | goto done; | 551 | goto done; |
@@ -560,7 +560,7 @@ static int gk20a_tsg_ioctl_set_runlist_interleave(struct gk20a *g, | |||
560 | 560 | ||
561 | gk20a_idle(g->dev); | 561 | gk20a_idle(g->dev); |
562 | done: | 562 | done: |
563 | mutex_unlock(&sched->control_lock); | 563 | nvgpu_mutex_release(&sched->control_lock); |
564 | return err; | 564 | return err; |
565 | } | 565 | } |
566 | 566 | ||
@@ -572,7 +572,7 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g, | |||
572 | 572 | ||
573 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); | 573 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); |
574 | 574 | ||
575 | mutex_lock(&sched->control_lock); | 575 | nvgpu_mutex_acquire(&sched->control_lock); |
576 | if (sched->control_locked) { | 576 | if (sched->control_locked) { |
577 | err = -EPERM; | 577 | err = -EPERM; |
578 | goto done; | 578 | goto done; |
@@ -585,7 +585,7 @@ static int gk20a_tsg_ioctl_set_timeslice(struct gk20a *g, | |||
585 | err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us); | 585 | err = gk20a_tsg_set_timeslice(tsg, arg->timeslice_us); |
586 | gk20a_idle(g->dev); | 586 | gk20a_idle(g->dev); |
587 | done: | 587 | done: |
588 | mutex_unlock(&sched->control_lock); | 588 | nvgpu_mutex_release(&sched->control_lock); |
589 | return err; | 589 | return err; |
590 | } | 590 | } |
591 | 591 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h index a6642682..f95ae008 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #ifndef __TSG_GK20A_H_ | 16 | #ifndef __TSG_GK20A_H_ |
17 | #define __TSG_GK20A_H_ | 17 | #define __TSG_GK20A_H_ |
18 | 18 | ||
19 | #include <nvgpu/lock.h> | ||
20 | |||
19 | #define NVGPU_INVALID_TSG_ID (-1) | 21 | #define NVGPU_INVALID_TSG_ID (-1) |
20 | 22 | ||
21 | struct channel_gk20a; | 23 | struct channel_gk20a; |
@@ -58,7 +60,7 @@ struct tsg_gk20a { | |||
58 | u32 interleave_level; | 60 | u32 interleave_level; |
59 | 61 | ||
60 | struct list_head event_id_list; | 62 | struct list_head event_id_list; |
61 | struct mutex event_id_list_lock; | 63 | struct nvgpu_mutex event_id_list_lock; |
62 | 64 | ||
63 | u32 runlist_id; | 65 | u32 runlist_id; |
64 | pid_t tgid; | 66 | pid_t tgid; |