summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-09-28 15:32:18 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-14 17:13:18 -0400
commit7685f60d9dd6ed062f3037d4e72ea124c103d211 (patch)
treea30185cde3b5f4fe1f4b657c7708e2fe8e07ee96 /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parentc7e0c7e7ce045a9bef53f3fa11377ddb23bd79a3 (diff)
gpu: nvgpu: Abstract rw_semaphore implementation
Abstract implementation of rw_semaphore. In Linux it's implemented in terms of rw_semaphore. Change deterministic_busy to use the new implementation. JIRA NVGPU-259 Change-Id: Ia9c1b6e397581bff7711c5ab6fb76ef6d23cff87 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1570405 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index e3937afd..713c4215 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -602,10 +602,10 @@ unbind:
602 602
603 /* put back the channel-wide submit ref from init */ 603 /* put back the channel-wide submit ref from init */
604 if (ch->deterministic) { 604 if (ch->deterministic) {
605 down_read(&g->deterministic_busy); 605 nvgpu_rwsem_down_read(&g->deterministic_busy);
606 ch->deterministic = false; 606 ch->deterministic = false;
607 gk20a_idle(g); 607 gk20a_idle(g);
608 up_read(&g->deterministic_busy); 608 nvgpu_rwsem_up_read(&g->deterministic_busy);
609 } 609 }
610 610
611 ch->vpr = false; 611 ch->vpr = false;
@@ -1268,7 +1268,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1268 c->vpr = true; 1268 c->vpr = true;
1269 1269
1270 if (flags & NVGPU_ALLOC_GPFIFO_EX_FLAGS_DETERMINISTIC) { 1270 if (flags & NVGPU_ALLOC_GPFIFO_EX_FLAGS_DETERMINISTIC) {
1271 down_read(&g->deterministic_busy); 1271 nvgpu_rwsem_down_read(&g->deterministic_busy);
1272 /* 1272 /*
1273 * Railgating isn't deterministic; instead of disallowing 1273 * Railgating isn't deterministic; instead of disallowing
1274 * railgating globally, take a power refcount for this 1274 * railgating globally, take a power refcount for this
@@ -1280,12 +1280,12 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1280 */ 1280 */
1281 err = gk20a_busy(g); 1281 err = gk20a_busy(g);
1282 if (err) { 1282 if (err) {
1283 up_read(&g->deterministic_busy); 1283 nvgpu_rwsem_up_read(&g->deterministic_busy);
1284 return err; 1284 return err;
1285 } 1285 }
1286 1286
1287 c->deterministic = true; 1287 c->deterministic = true;
1288 up_read(&g->deterministic_busy); 1288 nvgpu_rwsem_up_read(&g->deterministic_busy);
1289 } 1289 }
1290 1290
1291 /* an address space needs to have been bound at this point. */ 1291 /* an address space needs to have been bound at this point. */
@@ -1397,10 +1397,10 @@ clean_up:
1397 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); 1397 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
1398clean_up_idle: 1398clean_up_idle:
1399 if (c->deterministic) { 1399 if (c->deterministic) {
1400 down_read(&g->deterministic_busy); 1400 nvgpu_rwsem_down_read(&g->deterministic_busy);
1401 gk20a_idle(g); 1401 gk20a_idle(g);
1402 c->deterministic = false; 1402 c->deterministic = false;
1403 up_read(&g->deterministic_busy); 1403 nvgpu_rwsem_up_read(&g->deterministic_busy);
1404 } 1404 }
1405 nvgpu_err(g, "fail"); 1405 nvgpu_err(g, "fail");
1406 return err; 1406 return err;
@@ -2661,7 +2661,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2661 2661
2662 /* Grab access to HW to deal with do_idle */ 2662 /* Grab access to HW to deal with do_idle */
2663 if (c->deterministic) 2663 if (c->deterministic)
2664 down_read(&g->deterministic_busy); 2664 nvgpu_rwsem_down_read(&g->deterministic_busy);
2665 2665
2666 trace_gk20a_channel_submit_gpfifo(g->name, 2666 trace_gk20a_channel_submit_gpfifo(g->name,
2667 c->chid, 2667 c->chid,
@@ -2741,7 +2741,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
2741 2741
2742 /* No hw access beyond this point */ 2742 /* No hw access beyond this point */
2743 if (c->deterministic) 2743 if (c->deterministic)
2744 up_read(&g->deterministic_busy); 2744 nvgpu_rwsem_up_read(&g->deterministic_busy);
2745 2745
2746 trace_gk20a_channel_submitted_gpfifo(g->name, 2746 trace_gk20a_channel_submitted_gpfifo(g->name,
2747 c->chid, 2747 c->chid,
@@ -2765,7 +2765,7 @@ clean_up:
2765 gk20a_fence_put(pre_fence); 2765 gk20a_fence_put(pre_fence);
2766 gk20a_fence_put(post_fence); 2766 gk20a_fence_put(post_fence);
2767 if (c->deterministic) 2767 if (c->deterministic)
2768 up_read(&g->deterministic_busy); 2768 nvgpu_rwsem_up_read(&g->deterministic_busy);
2769 else if (need_deferred_cleanup) 2769 else if (need_deferred_cleanup)
2770 gk20a_idle(g); 2770 gk20a_idle(g);
2771 2771
@@ -2787,7 +2787,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
2787 u32 chid; 2787 u32 chid;
2788 2788
2789 /* Grab exclusive access to the hw to block new submits */ 2789 /* Grab exclusive access to the hw to block new submits */
2790 down_write(&g->deterministic_busy); 2790 nvgpu_rwsem_down_write(&g->deterministic_busy);
2791 2791
2792 for (chid = 0; chid < f->num_channels; chid++) { 2792 for (chid = 0; chid < f->num_channels; chid++) {
2793 struct channel_gk20a *ch = &f->channel[chid]; 2793 struct channel_gk20a *ch = &f->channel[chid];
@@ -2845,7 +2845,7 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
2845 } 2845 }
2846 2846
2847 /* Release submits, new deterministic channels and frees */ 2847 /* Release submits, new deterministic channels and frees */
2848 up_write(&g->deterministic_busy); 2848 nvgpu_rwsem_up_write(&g->deterministic_busy);
2849} 2849}
2850 2850
2851int gk20a_init_channel_support(struct gk20a *g, u32 chid) 2851int gk20a_init_channel_support(struct gk20a *g, u32 chid)