summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/cde_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 2a9ad40d..d43bc93f 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -101,9 +101,9 @@ __acquires(&cde_app->mutex)
101 return; 101 return;
102 102
103 if (wait_finish) { 103 if (wait_finish) {
104 mutex_unlock(&cde_app->mutex); 104 nvgpu_mutex_release(&cde_app->mutex);
105 cancel_delayed_work_sync(&cde_ctx->ctx_deleter_work); 105 cancel_delayed_work_sync(&cde_ctx->ctx_deleter_work);
106 mutex_lock(&cde_app->mutex); 106 nvgpu_mutex_acquire(&cde_app->mutex);
107 } else { 107 } else {
108 cancel_delayed_work(&cde_ctx->ctx_deleter_work); 108 cancel_delayed_work(&cde_ctx->ctx_deleter_work);
109 } 109 }
@@ -152,9 +152,9 @@ __releases(&cde_app->mutex)
152 if (!cde_app->initialised) 152 if (!cde_app->initialised)
153 return; 153 return;
154 154
155 mutex_lock(&cde_app->mutex); 155 nvgpu_mutex_acquire(&cde_app->mutex);
156 gk20a_cde_stop(g); 156 gk20a_cde_stop(g);
157 mutex_unlock(&cde_app->mutex); 157 nvgpu_mutex_release(&cde_app->mutex);
158} 158}
159 159
160void gk20a_cde_suspend(struct gk20a *g) 160void gk20a_cde_suspend(struct gk20a *g)
@@ -167,7 +167,7 @@ __releases(&cde_app->mutex)
167 if (!cde_app->initialised) 167 if (!cde_app->initialised)
168 return; 168 return;
169 169
170 mutex_lock(&cde_app->mutex); 170 nvgpu_mutex_acquire(&cde_app->mutex);
171 171
172 list_for_each_entry_safe(cde_ctx, cde_ctx_save, 172 list_for_each_entry_safe(cde_ctx, cde_ctx_save,
173 &cde_app->free_contexts, list) { 173 &cde_app->free_contexts, list) {
@@ -179,7 +179,7 @@ __releases(&cde_app->mutex)
179 gk20a_cde_cancel_deleter(cde_ctx, false); 179 gk20a_cde_cancel_deleter(cde_ctx, false);
180 } 180 }
181 181
182 mutex_unlock(&cde_app->mutex); 182 nvgpu_mutex_release(&cde_app->mutex);
183 183
184} 184}
185 185
@@ -739,7 +739,7 @@ __releases(&cde_app->mutex)
739 gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx); 739 gk20a_dbg(gpu_dbg_cde_ctx, "releasing use on %p", cde_ctx);
740 trace_gk20a_cde_release(cde_ctx); 740 trace_gk20a_cde_release(cde_ctx);
741 741
742 mutex_lock(&cde_app->mutex); 742 nvgpu_mutex_acquire(&cde_app->mutex);
743 743
744 if (cde_ctx->in_use) { 744 if (cde_ctx->in_use) {
745 cde_ctx->in_use = false; 745 cde_ctx->in_use = false;
@@ -749,7 +749,7 @@ __releases(&cde_app->mutex)
749 gk20a_dbg_info("double release cde context %p", cde_ctx); 749 gk20a_dbg_info("double release cde context %p", cde_ctx);
750 } 750 }
751 751
752 mutex_unlock(&cde_app->mutex); 752 nvgpu_mutex_release(&cde_app->mutex);
753} 753}
754 754
755static void gk20a_cde_ctx_deleter_fn(struct work_struct *work) 755static void gk20a_cde_ctx_deleter_fn(struct work_struct *work)
@@ -779,7 +779,7 @@ __releases(&cde_app->mutex)
779 return; 779 return;
780 } 780 }
781 781
782 mutex_lock(&cde_app->mutex); 782 nvgpu_mutex_acquire(&cde_app->mutex);
783 if (cde_ctx->in_use || !cde_app->initialised) { 783 if (cde_ctx->in_use || !cde_app->initialised) {
784 gk20a_dbg(gpu_dbg_cde_ctx, 784 gk20a_dbg(gpu_dbg_cde_ctx,
785 "cde: context use raced, not deleting %p", 785 "cde: context use raced, not deleting %p",
@@ -797,7 +797,7 @@ __releases(&cde_app->mutex)
797 cde_app->ctx_count_top); 797 cde_app->ctx_count_top);
798 798
799out: 799out:
800 mutex_unlock(&cde_app->mutex); 800 nvgpu_mutex_release(&cde_app->mutex);
801 gk20a_idle(dev); 801 gk20a_idle(dev);
802} 802}
803 803
@@ -876,9 +876,9 @@ __acquires(&cde_app->mutex)
876 break; 876 break;
877 877
878 /* exhausted, retry */ 878 /* exhausted, retry */
879 mutex_unlock(&cde_app->mutex); 879 nvgpu_mutex_release(&cde_app->mutex);
880 cond_resched(); 880 cond_resched();
881 mutex_lock(&cde_app->mutex); 881 nvgpu_mutex_acquire(&cde_app->mutex);
882 } while (!nvgpu_timeout_expired(&timeout)); 882 } while (!nvgpu_timeout_expired(&timeout));
883 883
884 return cde_ctx; 884 return cde_ctx;
@@ -946,7 +946,7 @@ __releases(&cde_app->mutex)
946 scatterbuffer_byte_offset < compbits_byte_offset) 946 scatterbuffer_byte_offset < compbits_byte_offset)
947 return -EINVAL; 947 return -EINVAL;
948 948
949 mutex_lock(&g->cde_app.mutex); 949 nvgpu_mutex_acquire(&g->cde_app.mutex);
950 950
951 cde_ctx = gk20a_cde_get_context(g); 951 cde_ctx = gk20a_cde_get_context(g);
952 if (IS_ERR(cde_ctx)) { 952 if (IS_ERR(cde_ctx)) {
@@ -1118,7 +1118,7 @@ exit_unlock:
1118 if (surface) 1118 if (surface)
1119 dma_buf_vunmap(compbits_scatter_buf, surface); 1119 dma_buf_vunmap(compbits_scatter_buf, surface);
1120 1120
1121 mutex_unlock(&g->cde_app.mutex); 1121 nvgpu_mutex_release(&g->cde_app.mutex);
1122 return err; 1122 return err;
1123} 1123}
1124 1124
@@ -1155,13 +1155,13 @@ __releases(&cde_app->mutex)
1155 "cde: channel had timed out" 1155 "cde: channel had timed out"
1156 ", reloading"); 1156 ", reloading");
1157 /* mark it to be deleted, replace with a new one */ 1157 /* mark it to be deleted, replace with a new one */
1158 mutex_lock(&cde_app->mutex); 1158 nvgpu_mutex_acquire(&cde_app->mutex);
1159 cde_ctx->is_temporary = true; 1159 cde_ctx->is_temporary = true;
1160 if (gk20a_cde_create_context(g)) { 1160 if (gk20a_cde_create_context(g)) {
1161 gk20a_err(cde_ctx->dev, 1161 gk20a_err(cde_ctx->dev,
1162 "cde: can't replace context"); 1162 "cde: can't replace context");
1163 } 1163 }
1164 mutex_unlock(&cde_app->mutex); 1164 nvgpu_mutex_release(&cde_app->mutex);
1165 } 1165 }
1166 } 1166 }
1167 1167
@@ -1274,7 +1274,7 @@ __releases(&cde_app->mutex)
1274 if (err) 1274 if (err)
1275 return err; 1275 return err;
1276 1276
1277 mutex_lock(&cde_app->mutex); 1277 nvgpu_mutex_acquire(&cde_app->mutex);
1278 1278
1279 gk20a_cde_stop(g); 1279 gk20a_cde_stop(g);
1280 1280
@@ -1282,7 +1282,7 @@ __releases(&cde_app->mutex)
1282 if (!err) 1282 if (!err)
1283 cde_app->initialised = true; 1283 cde_app->initialised = true;
1284 1284
1285 mutex_unlock(&cde_app->mutex); 1285 nvgpu_mutex_release(&cde_app->mutex);
1286 1286
1287 gk20a_idle(g->dev); 1287 gk20a_idle(g->dev);
1288 return err; 1288 return err;
@@ -1300,8 +1300,8 @@ __releases(&cde_app->mutex)
1300 1300
1301 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init"); 1301 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: init");
1302 1302
1303 mutex_init(&cde_app->mutex); 1303 nvgpu_mutex_init(&cde_app->mutex);
1304 mutex_lock(&cde_app->mutex); 1304 nvgpu_mutex_acquire(&cde_app->mutex);
1305 1305
1306 INIT_LIST_HEAD(&cde_app->free_contexts); 1306 INIT_LIST_HEAD(&cde_app->free_contexts);
1307 INIT_LIST_HEAD(&cde_app->used_contexts); 1307 INIT_LIST_HEAD(&cde_app->used_contexts);
@@ -1313,7 +1313,7 @@ __releases(&cde_app->mutex)
1313 if (!err) 1313 if (!err)
1314 cde_app->initialised = true; 1314 cde_app->initialised = true;
1315 1315
1316 mutex_unlock(&cde_app->mutex); 1316 nvgpu_mutex_release(&cde_app->mutex);
1317 gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err); 1317 gk20a_dbg(gpu_dbg_cde_ctx, "cde: init finished: %d", err);
1318 return err; 1318 return err;
1319} 1319}
@@ -1561,7 +1561,7 @@ int gk20a_prepare_compressible_read(
1561 1561
1562 missing_bits = (state->valid_compbits ^ request) & request; 1562 missing_bits = (state->valid_compbits ^ request) & request;
1563 1563
1564 mutex_lock(&state->lock); 1564 nvgpu_mutex_acquire(&state->lock);
1565 1565
1566 if (state->valid_compbits && request == NVGPU_GPU_COMPBITS_NONE) { 1566 if (state->valid_compbits && request == NVGPU_GPU_COMPBITS_NONE) {
1567 1567
@@ -1599,7 +1599,7 @@ int gk20a_prepare_compressible_read(
1599 *zbc_color = state->zbc_color; 1599 *zbc_color = state->zbc_color;
1600 1600
1601out: 1601out:
1602 mutex_unlock(&state->lock); 1602 nvgpu_mutex_release(&state->lock);
1603 dma_buf_put(dmabuf); 1603 dma_buf_put(dmabuf);
1604 return err; 1604 return err;
1605} 1605}
@@ -1624,7 +1624,7 @@ int gk20a_mark_compressible_write(struct gk20a *g, u32 buffer_fd,
1624 return err; 1624 return err;
1625 } 1625 }
1626 1626
1627 mutex_lock(&state->lock); 1627 nvgpu_mutex_acquire(&state->lock);
1628 1628
1629 /* Update the compbits state. */ 1629 /* Update the compbits state. */
1630 state->valid_compbits = valid_compbits; 1630 state->valid_compbits = valid_compbits;
@@ -1634,7 +1634,7 @@ int gk20a_mark_compressible_write(struct gk20a *g, u32 buffer_fd,
1634 gk20a_fence_put(state->fence); 1634 gk20a_fence_put(state->fence);
1635 state->fence = NULL; 1635 state->fence = NULL;
1636 1636
1637 mutex_unlock(&state->lock); 1637 nvgpu_mutex_release(&state->lock);
1638 dma_buf_put(dmabuf); 1638 dma_buf_put(dmabuf);
1639 return 0; 1639 return 0;
1640} 1640}