summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-01-12 22:18:21 -0500
committerVarun Colbert <vcolbert@nvidia.com>2017-02-13 21:15:03 -0500
commite7a0c0ae8b6791c6b8ee30270ebdbe6e95fbbc71 (patch)
tree0f62852593ad151cc716e258ece88fb5da30a3ae /drivers
parentaa36d3786aeed6755b9744fed37aad000b582322 (diff)
gpu: nvgpu: Move from gk20a_ to nvgpu_ in semaphore code
Change the prefix in the semaphore code to 'nvgpu_' since this code is global to all chips. Bug 1799159 Change-Id: Ic1f3e13428882019e5d1f547acfe95271cc10da5 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1284628 Reviewed-by: Varun Colbert <vcolbert@nvidia.com> Tested-by: Varun Colbert <vcolbert@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c72
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c58
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/semaphore.h86
14 files changed, 155 insertions, 156 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index ea4910f1..4bf8695d 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -37,12 +37,12 @@
37/* 37/*
38 * Return the sema_sea pointer. 38 * Return the sema_sea pointer.
39 */ 39 */
40struct gk20a_semaphore_sea *gk20a_semaphore_get_sea(struct gk20a *g) 40struct nvgpu_semaphore_sea *nvgpu_semaphore_get_sea(struct gk20a *g)
41{ 41{
42 return g->sema_sea; 42 return g->sema_sea;
43} 43}
44 44
45static int __gk20a_semaphore_sea_grow(struct gk20a_semaphore_sea *sea) 45static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
46{ 46{
47 int ret = 0; 47 int ret = 0;
48 struct gk20a *gk20a = sea->gk20a; 48 struct gk20a *gk20a = sea->gk20a;
@@ -68,7 +68,7 @@ out:
68 * Create the semaphore sea. Only create it once - subsequent calls to this will 68 * Create the semaphore sea. Only create it once - subsequent calls to this will
69 * return the originally created sea pointer. 69 * return the originally created sea pointer.
70 */ 70 */
71struct gk20a_semaphore_sea *gk20a_semaphore_sea_create(struct gk20a *g) 71struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
72{ 72{
73 if (g->sema_sea) 73 if (g->sema_sea)
74 return g->sema_sea; 74 return g->sema_sea;
@@ -83,7 +83,7 @@ struct gk20a_semaphore_sea *gk20a_semaphore_sea_create(struct gk20a *g)
83 INIT_LIST_HEAD(&g->sema_sea->pool_list); 83 INIT_LIST_HEAD(&g->sema_sea->pool_list);
84 mutex_init(&g->sema_sea->sea_lock); 84 mutex_init(&g->sema_sea->sea_lock);
85 85
86 if (__gk20a_semaphore_sea_grow(g->sema_sea)) 86 if (__nvgpu_semaphore_sea_grow(g->sema_sea))
87 goto cleanup; 87 goto cleanup;
88 88
89 gpu_sema_dbg("Created semaphore sea!"); 89 gpu_sema_dbg("Created semaphore sea!");
@@ -111,10 +111,10 @@ static int __semaphore_bitmap_alloc(unsigned long *bitmap, unsigned long len)
111/* 111/*
112 * Allocate a pool from the sea. 112 * Allocate a pool from the sea.
113 */ 113 */
114struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc( 114struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
115 struct gk20a_semaphore_sea *sea) 115 struct nvgpu_semaphore_sea *sea)
116{ 116{
117 struct gk20a_semaphore_pool *p; 117 struct nvgpu_semaphore_pool *p;
118 unsigned long page_idx; 118 unsigned long page_idx;
119 int ret, err = 0; 119 int ret, err = 0;
120 120
@@ -159,7 +159,7 @@ fail:
159 * Map a pool into the passed vm's address space. This handles both the fixed 159 * Map a pool into the passed vm's address space. This handles both the fixed
160 * global RO mapping and the non-fixed private RW mapping. 160 * global RO mapping and the non-fixed private RW mapping.
161 */ 161 */
162int gk20a_semaphore_pool_map(struct gk20a_semaphore_pool *p, 162int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
163 struct vm_gk20a *vm) 163 struct vm_gk20a *vm)
164{ 164{
165 int ents, err = 0; 165 int ents, err = 0;
@@ -252,10 +252,10 @@ fail:
252/* 252/*
253 * Unmap a semaphore_pool. 253 * Unmap a semaphore_pool.
254 */ 254 */
255void gk20a_semaphore_pool_unmap(struct gk20a_semaphore_pool *p, 255void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
256 struct vm_gk20a *vm) 256 struct vm_gk20a *vm)
257{ 257{
258 struct gk20a_semaphore_int *hw_sema; 258 struct nvgpu_semaphore_int *hw_sema;
259 259
260 kunmap(p->cpu_va); 260 kunmap(p->cpu_va);
261 261
@@ -291,12 +291,12 @@ void gk20a_semaphore_pool_unmap(struct gk20a_semaphore_pool *p,
291 * Completely free a sempahore_pool. You should make sure this pool is not 291 * Completely free a sempahore_pool. You should make sure this pool is not
292 * mapped otherwise there's going to be a memory leak. 292 * mapped otherwise there's going to be a memory leak.
293 */ 293 */
294static void gk20a_semaphore_pool_free(struct kref *ref) 294static void nvgpu_semaphore_pool_free(struct kref *ref)
295{ 295{
296 struct gk20a_semaphore_pool *p = 296 struct nvgpu_semaphore_pool *p =
297 container_of(ref, struct gk20a_semaphore_pool, ref); 297 container_of(ref, struct nvgpu_semaphore_pool, ref);
298 struct gk20a_semaphore_sea *s = p->sema_sea; 298 struct nvgpu_semaphore_sea *s = p->sema_sea;
299 struct gk20a_semaphore_int *hw_sema, *tmp; 299 struct nvgpu_semaphore_int *hw_sema, *tmp;
300 300
301 WARN_ON(p->gpu_va || p->rw_sg_table || p->ro_sg_table); 301 WARN_ON(p->gpu_va || p->rw_sg_table || p->ro_sg_table);
302 302
@@ -313,21 +313,21 @@ static void gk20a_semaphore_pool_free(struct kref *ref)
313 kfree(p); 313 kfree(p);
314} 314}
315 315
316void gk20a_semaphore_pool_get(struct gk20a_semaphore_pool *p) 316void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p)
317{ 317{
318 kref_get(&p->ref); 318 kref_get(&p->ref);
319} 319}
320 320
321void gk20a_semaphore_pool_put(struct gk20a_semaphore_pool *p) 321void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p)
322{ 322{
323 kref_put(&p->ref, gk20a_semaphore_pool_free); 323 kref_put(&p->ref, nvgpu_semaphore_pool_free);
324} 324}
325 325
326/* 326/*
327 * Get the address for a semaphore_pool - if global is true then return the 327 * Get the address for a semaphore_pool - if global is true then return the
328 * global RO address instead of the RW address owned by the semaphore's VM. 328 * global RO address instead of the RW address owned by the semaphore's VM.
329 */ 329 */
330u64 __gk20a_semaphore_pool_gpu_va(struct gk20a_semaphore_pool *p, bool global) 330u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global)
331{ 331{
332 if (!global) 332 if (!global)
333 return p->gpu_va; 333 return p->gpu_va;
@@ -335,12 +335,12 @@ u64 __gk20a_semaphore_pool_gpu_va(struct gk20a_semaphore_pool *p, bool global)
335 return p->gpu_va_ro + (PAGE_SIZE * p->page_idx); 335 return p->gpu_va_ro + (PAGE_SIZE * p->page_idx);
336} 336}
337 337
338static int __gk20a_init_hw_sema(struct channel_gk20a *ch) 338static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
339{ 339{
340 int hw_sema_idx; 340 int hw_sema_idx;
341 int ret = 0; 341 int ret = 0;
342 struct gk20a_semaphore_int *hw_sema; 342 struct nvgpu_semaphore_int *hw_sema;
343 struct gk20a_semaphore_pool *p = ch->vm->sema_pool; 343 struct nvgpu_semaphore_pool *p = ch->vm->sema_pool;
344 344
345 BUG_ON(!p); 345 BUG_ON(!p);
346 346
@@ -354,7 +354,7 @@ static int __gk20a_init_hw_sema(struct channel_gk20a *ch)
354 goto fail; 354 goto fail;
355 } 355 }
356 356
357 hw_sema = kzalloc(sizeof(struct gk20a_semaphore_int), GFP_KERNEL); 357 hw_sema = kzalloc(sizeof(struct nvgpu_semaphore_int), GFP_KERNEL);
358 if (!hw_sema) { 358 if (!hw_sema) {
359 ret = -ENOMEM; 359 ret = -ENOMEM;
360 goto fail_free_idx; 360 goto fail_free_idx;
@@ -385,9 +385,9 @@ fail:
385/* 385/*
386 * Free the channel used semaphore index 386 * Free the channel used semaphore index
387 */ 387 */
388void gk20a_semaphore_free_hw_sema(struct channel_gk20a *ch) 388void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch)
389{ 389{
390 struct gk20a_semaphore_pool *p = ch->vm->sema_pool; 390 struct nvgpu_semaphore_pool *p = ch->vm->sema_pool;
391 391
392 BUG_ON(!p); 392 BUG_ON(!p);
393 393
@@ -409,13 +409,13 @@ void gk20a_semaphore_free_hw_sema(struct channel_gk20a *ch)
409 * Since semaphores are ref-counted there's no explicit free for external code 409 * Since semaphores are ref-counted there's no explicit free for external code
410 * to use. When the ref-count hits 0 the internal free will happen. 410 * to use. When the ref-count hits 0 the internal free will happen.
411 */ 411 */
412struct gk20a_semaphore *gk20a_semaphore_alloc(struct channel_gk20a *ch) 412struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
413{ 413{
414 struct gk20a_semaphore *s; 414 struct nvgpu_semaphore *s;
415 int ret; 415 int ret;
416 416
417 if (!ch->hw_sema) { 417 if (!ch->hw_sema) {
418 ret = __gk20a_init_hw_sema(ch); 418 ret = __nvgpu_init_hw_sema(ch);
419 if (ret) 419 if (ret)
420 return NULL; 420 return NULL;
421 } 421 }
@@ -432,29 +432,29 @@ struct gk20a_semaphore *gk20a_semaphore_alloc(struct channel_gk20a *ch)
432 * Take a ref on the pool so that we can keep this pool alive for 432 * Take a ref on the pool so that we can keep this pool alive for
433 * as long as this semaphore is alive. 433 * as long as this semaphore is alive.
434 */ 434 */
435 gk20a_semaphore_pool_get(s->hw_sema->p); 435 nvgpu_semaphore_pool_get(s->hw_sema->p);
436 436
437 gpu_sema_dbg("Allocated semaphore (c=%d)", ch->hw_chid); 437 gpu_sema_dbg("Allocated semaphore (c=%d)", ch->hw_chid);
438 438
439 return s; 439 return s;
440} 440}
441 441
442static void gk20a_semaphore_free(struct kref *ref) 442static void nvgpu_semaphore_free(struct kref *ref)
443{ 443{
444 struct gk20a_semaphore *s = 444 struct nvgpu_semaphore *s =
445 container_of(ref, struct gk20a_semaphore, ref); 445 container_of(ref, struct nvgpu_semaphore, ref);
446 446
447 gk20a_semaphore_pool_put(s->hw_sema->p); 447 nvgpu_semaphore_pool_put(s->hw_sema->p);
448 448
449 kfree(s); 449 kfree(s);
450} 450}
451 451
452void gk20a_semaphore_put(struct gk20a_semaphore *s) 452void nvgpu_semaphore_put(struct nvgpu_semaphore *s)
453{ 453{
454 kref_put(&s->ref, gk20a_semaphore_free); 454 kref_put(&s->ref, nvgpu_semaphore_free);
455} 455}
456 456
457void gk20a_semaphore_get(struct gk20a_semaphore *s) 457void nvgpu_semaphore_get(struct nvgpu_semaphore *s)
458{ 458{
459 kref_get(&s->ref); 459 kref_get(&s->ref);
460} 460}
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 3fa6bb25..b963ad96 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -515,7 +515,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
515 while (tmp_get != put) { 515 while (tmp_get != put) {
516 job = &ch->joblist.pre_alloc.jobs[tmp_get]; 516 job = &ch->joblist.pre_alloc.jobs[tmp_get];
517 if (job->post_fence->semaphore) { 517 if (job->post_fence->semaphore) {
518 __gk20a_semaphore_release( 518 __nvgpu_semaphore_release(
519 job->post_fence->semaphore, true); 519 job->post_fence->semaphore, true);
520 released_job_semaphore = true; 520 released_job_semaphore = true;
521 } 521 }
@@ -525,7 +525,7 @@ void gk20a_channel_abort_clean_up(struct channel_gk20a *ch)
525 list_for_each_entry_safe(job, n, 525 list_for_each_entry_safe(job, n,
526 &ch->joblist.dynamic.jobs, list) { 526 &ch->joblist.dynamic.jobs, list) {
527 if (job->post_fence->semaphore) { 527 if (job->post_fence->semaphore) {
528 __gk20a_semaphore_release( 528 __nvgpu_semaphore_release(
529 job->post_fence->semaphore, true); 529 job->post_fence->semaphore, true);
530 released_job_semaphore = true; 530 released_job_semaphore = true;
531 } 531 }
@@ -1006,7 +1006,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
1006 * as the semaphore pool might get freed after that point. 1006 * as the semaphore pool might get freed after that point.
1007 */ 1007 */
1008 if (ch->hw_sema) 1008 if (ch->hw_sema)
1009 gk20a_semaphore_free_hw_sema(ch); 1009 nvgpu_semaphore_free_hw_sema(ch);
1010 1010
1011 /* 1011 /*
1012 * When releasing the channel we unbind the VM - so release the ref. 1012 * When releasing the channel we unbind the VM - so release the ref.
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index fce3f461..f940a271 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -172,7 +172,7 @@ struct channel_gk20a {
172 spinlock_t ref_actions_lock; 172 spinlock_t ref_actions_lock;
173#endif 173#endif
174 174
175 struct gk20a_semaphore_int *hw_sema; 175 struct nvgpu_semaphore_int *hw_sema;
176 176
177 int hw_chid; 177 int hw_chid;
178 bool wdt_enabled; 178 bool wdt_enabled;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 0eba1c30..18971b09 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -388,7 +388,7 @@ struct gk20a_channel_semaphore {
388 struct channel_gk20a *c; 388 struct channel_gk20a *c;
389 389
390 /* A semaphore pool owned by this channel. */ 390 /* A semaphore pool owned by this channel. */
391 struct gk20a_semaphore_pool *pool; 391 struct nvgpu_semaphore_pool *pool;
392 392
393 /* A sync timeline that advances when gpu completes work. */ 393 /* A sync timeline that advances when gpu completes work. */
394 struct sync_timeline *timeline; 394 struct sync_timeline *timeline;
@@ -399,7 +399,7 @@ struct wait_fence_work {
399 struct sync_fence_waiter waiter; 399 struct sync_fence_waiter waiter;
400 struct sync_fence *fence; 400 struct sync_fence *fence;
401 struct channel_gk20a *ch; 401 struct channel_gk20a *ch;
402 struct gk20a_semaphore *sema; 402 struct nvgpu_semaphore *sema;
403 struct gk20a *g; 403 struct gk20a *g;
404 struct list_head entry; 404 struct list_head entry;
405}; 405};
@@ -501,14 +501,14 @@ static void gk20a_channel_semaphore_launcher(
501 "wait completed (%d) for fence %p '%s', triggering gpu work", 501 "wait completed (%d) for fence %p '%s', triggering gpu work",
502 err, fence, fence->name); 502 err, fence, fence->name);
503 sync_fence_put(fence); 503 sync_fence_put(fence);
504 gk20a_semaphore_release(w->sema); 504 nvgpu_semaphore_release(w->sema);
505 gk20a_semaphore_put(w->sema); 505 nvgpu_semaphore_put(w->sema);
506 kfree(w); 506 kfree(w);
507} 507}
508#endif 508#endif
509 509
510static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c, 510static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
511 struct gk20a_semaphore *s, struct priv_cmd_entry *cmd, 511 struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
512 int cmd_size, bool acquire, bool wfi) 512 int cmd_size, bool acquire, bool wfi)
513{ 513{
514 int ch = c->hw_chid; 514 int ch = c->hw_chid;
@@ -521,15 +521,15 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
521 * RO for acquire (since we just need to read the mem) and RW for 521 * RO for acquire (since we just need to read the mem) and RW for
522 * release since we will need to write back to the semaphore memory. 522 * release since we will need to write back to the semaphore memory.
523 */ 523 */
524 va = acquire ? gk20a_semaphore_gpu_ro_va(s) : 524 va = acquire ? nvgpu_semaphore_gpu_ro_va(s) :
525 gk20a_semaphore_gpu_rw_va(s); 525 nvgpu_semaphore_gpu_rw_va(s);
526 526
527 /* 527 /*
528 * If the op is not an acquire (so therefor a release) we should 528 * If the op is not an acquire (so therefor a release) we should
529 * incr the underlying sema next_value. 529 * incr the underlying sema next_value.
530 */ 530 */
531 if (!acquire) 531 if (!acquire)
532 gk20a_semaphore_incr(s); 532 nvgpu_semaphore_incr(s);
533 533
534 /* semaphore_a */ 534 /* semaphore_a */
535 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010004); 535 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010004);
@@ -545,7 +545,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
545 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006); 545 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006);
546 /* payload */ 546 /* payload */
547 gk20a_mem_wr32(g, cmd->mem, off++, 547 gk20a_mem_wr32(g, cmd->mem, off++,
548 gk20a_semaphore_get_value(s)); 548 nvgpu_semaphore_get_value(s));
549 /* semaphore_d */ 549 /* semaphore_d */
550 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007); 550 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007);
551 /* operation: acq_geq, switch_en */ 551 /* operation: acq_geq, switch_en */
@@ -555,7 +555,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
555 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006); 555 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010006);
556 /* payload */ 556 /* payload */
557 gk20a_mem_wr32(g, cmd->mem, off++, 557 gk20a_mem_wr32(g, cmd->mem, off++,
558 gk20a_semaphore_get_value(s)); 558 nvgpu_semaphore_get_value(s));
559 /* semaphore_d */ 559 /* semaphore_d */
560 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007); 560 gk20a_mem_wr32(g, cmd->mem, off++, 0x20010007);
561 /* operation: release, wfi */ 561 /* operation: release, wfi */
@@ -570,13 +570,13 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
570 if (acquire) 570 if (acquire)
571 gpu_sema_verbose_dbg("(A) c=%d ACQ_GE %-4u owner=%-3d" 571 gpu_sema_verbose_dbg("(A) c=%d ACQ_GE %-4u owner=%-3d"
572 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", 572 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
573 ch, gk20a_semaphore_get_value(s), 573 ch, nvgpu_semaphore_get_value(s),
574 s->hw_sema->ch->hw_chid, va, cmd->gva, 574 s->hw_sema->ch->hw_chid, va, cmd->gva,
575 cmd->mem->gpu_va, ob); 575 cmd->mem->gpu_va, ob);
576 else 576 else
577 gpu_sema_verbose_dbg("(R) c=%d INCR %u (%u) va=0x%llx " 577 gpu_sema_verbose_dbg("(R) c=%d INCR %u (%u) va=0x%llx "
578 "cmd_mem=0x%llx b=0x%llx off=%u", 578 "cmd_mem=0x%llx b=0x%llx off=%u",
579 ch, gk20a_semaphore_get_value(s), 579 ch, nvgpu_semaphore_get_value(s),
580 readl(s->hw_sema->value), va, cmd->gva, 580 readl(s->hw_sema->value), va, cmd->gva,
581 cmd->mem->gpu_va, ob); 581 cmd->mem->gpu_va, ob);
582} 582}
@@ -596,7 +596,7 @@ static int gk20a_channel_semaphore_wait_syncpt(
596#ifdef CONFIG_SYNC 596#ifdef CONFIG_SYNC
597/* 597/*
598 * Attempt a fast path for waiting on a sync_fence. Basically if the passed 598 * Attempt a fast path for waiting on a sync_fence. Basically if the passed
599 * sync_fence is backed by a gk20a_semaphore then there's no reason to go 599 * sync_fence is backed by a nvgpu_semaphore then there's no reason to go
600 * through the rigmarole of setting up a separate semaphore which waits on an 600 * through the rigmarole of setting up a separate semaphore which waits on an
601 * interrupt from the GPU and then triggers a worker thread to execute a SW 601 * interrupt from the GPU and then triggers a worker thread to execute a SW
602 * based semaphore release. Instead just have the GPU wait on the same semaphore 602 * based semaphore release. Instead just have the GPU wait on the same semaphore
@@ -609,9 +609,9 @@ static int gk20a_channel_semaphore_wait_syncpt(
609static int __semaphore_wait_fd_fast_path(struct channel_gk20a *c, 609static int __semaphore_wait_fd_fast_path(struct channel_gk20a *c,
610 struct sync_fence *fence, 610 struct sync_fence *fence,
611 struct priv_cmd_entry *wait_cmd, 611 struct priv_cmd_entry *wait_cmd,
612 struct gk20a_semaphore **fp_sema) 612 struct nvgpu_semaphore **fp_sema)
613{ 613{
614 struct gk20a_semaphore *sema; 614 struct nvgpu_semaphore *sema;
615 int err; 615 int err;
616 616
617 if (!gk20a_is_sema_backed_sync_fence(fence)) 617 if (!gk20a_is_sema_backed_sync_fence(fence))
@@ -632,7 +632,7 @@ static int __semaphore_wait_fd_fast_path(struct channel_gk20a *c,
632 if (err) 632 if (err)
633 return err; 633 return err;
634 634
635 gk20a_semaphore_get(sema); 635 nvgpu_semaphore_get(sema);
636 BUG_ON(!atomic_read(&sema->value)); 636 BUG_ON(!atomic_read(&sema->value));
637 add_sema_cmd(c->g, c, sema, wait_cmd, 8, true, false); 637 add_sema_cmd(c->g, c, sema, wait_cmd, 8, true, false);
638 638
@@ -655,7 +655,7 @@ static int gk20a_channel_semaphore_wait_fd(
655 container_of(s, struct gk20a_channel_semaphore, ops); 655 container_of(s, struct gk20a_channel_semaphore, ops);
656 struct channel_gk20a *c = sema->c; 656 struct channel_gk20a *c = sema->c;
657#ifdef CONFIG_SYNC 657#ifdef CONFIG_SYNC
658 struct gk20a_semaphore *fp_sema; 658 struct nvgpu_semaphore *fp_sema;
659 struct sync_fence *sync_fence; 659 struct sync_fence *sync_fence;
660 struct priv_cmd_entry *wait_cmd = entry; 660 struct priv_cmd_entry *wait_cmd = entry;
661 struct wait_fence_work *w = NULL; 661 struct wait_fence_work *w = NULL;
@@ -674,7 +674,7 @@ static int gk20a_channel_semaphore_wait_fd(
674 &c->semaphore_wq, 674 &c->semaphore_wq,
675 NULL, false, false); 675 NULL, false, false);
676 if (err) { 676 if (err) {
677 gk20a_semaphore_put(fp_sema); 677 nvgpu_semaphore_put(fp_sema);
678 goto clean_up_priv_cmd; 678 goto clean_up_priv_cmd;
679 } 679 }
680 } else 680 } else
@@ -716,7 +716,7 @@ static int gk20a_channel_semaphore_wait_fd(
716 w->fence = sync_fence; 716 w->fence = sync_fence;
717 w->g = c->g; 717 w->g = c->g;
718 w->ch = c; 718 w->ch = c;
719 w->sema = gk20a_semaphore_alloc(c); 719 w->sema = nvgpu_semaphore_alloc(c);
720 if (!w->sema) { 720 if (!w->sema) {
721 gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores"); 721 gk20a_err(dev_from_gk20a(c->g), "ran out of semaphores");
722 err = -ENOMEM; 722 err = -ENOMEM;
@@ -724,8 +724,8 @@ static int gk20a_channel_semaphore_wait_fd(
724 } 724 }
725 725
726 /* worker takes one reference */ 726 /* worker takes one reference */
727 gk20a_semaphore_get(w->sema); 727 nvgpu_semaphore_get(w->sema);
728 gk20a_semaphore_incr(w->sema); 728 nvgpu_semaphore_incr(w->sema);
729 729
730 /* GPU unblocked when the semaphore value increments. */ 730 /* GPU unblocked when the semaphore value increments. */
731 add_sema_cmd(c->g, c, w->sema, wait_cmd, 8, true, false); 731 add_sema_cmd(c->g, c, w->sema, wait_cmd, 8, true, false);
@@ -747,12 +747,12 @@ static int gk20a_channel_semaphore_wait_fd(
747 * If the sync_fence has already signaled then the above async_wait 747 * If the sync_fence has already signaled then the above async_wait
748 * will never trigger. This causes the semaphore release op to never 748 * will never trigger. This causes the semaphore release op to never
749 * happen which, in turn, hangs the GPU. That's bad. So let's just 749 * happen which, in turn, hangs the GPU. That's bad. So let's just
750 * do the gk20a_semaphore_release() right now. 750 * do the nvgpu_semaphore_release() right now.
751 */ 751 */
752 if (ret == 1) { 752 if (ret == 1) {
753 sync_fence_put(sync_fence); 753 sync_fence_put(sync_fence);
754 gk20a_semaphore_release(w->sema); 754 nvgpu_semaphore_release(w->sema);
755 gk20a_semaphore_put(w->sema); 755 nvgpu_semaphore_put(w->sema);
756 } 756 }
757 757
758skip_slow_path: 758skip_slow_path:
@@ -763,8 +763,8 @@ clean_up_sema:
763 * Release the refs to the semaphore, including 763 * Release the refs to the semaphore, including
764 * the one for the worker since it will never run. 764 * the one for the worker since it will never run.
765 */ 765 */
766 gk20a_semaphore_put(w->sema); 766 nvgpu_semaphore_put(w->sema);
767 gk20a_semaphore_put(w->sema); 767 nvgpu_semaphore_put(w->sema);
768clean_up_worker: 768clean_up_worker:
769 kfree(w); 769 kfree(w);
770clean_up_priv_cmd: 770clean_up_priv_cmd:
@@ -790,10 +790,10 @@ static int __gk20a_channel_semaphore_incr(
790 struct gk20a_channel_semaphore *sp = 790 struct gk20a_channel_semaphore *sp =
791 container_of(s, struct gk20a_channel_semaphore, ops); 791 container_of(s, struct gk20a_channel_semaphore, ops);
792 struct channel_gk20a *c = sp->c; 792 struct channel_gk20a *c = sp->c;
793 struct gk20a_semaphore *semaphore; 793 struct nvgpu_semaphore *semaphore;
794 int err = 0; 794 int err = 0;
795 795
796 semaphore = gk20a_semaphore_alloc(c); 796 semaphore = nvgpu_semaphore_alloc(c);
797 if (!semaphore) { 797 if (!semaphore) {
798 gk20a_err(dev_from_gk20a(c->g), 798 gk20a_err(dev_from_gk20a(c->g),
799 "ran out of semaphores"); 799 "ran out of semaphores");
@@ -824,7 +824,7 @@ static int __gk20a_channel_semaphore_incr(
824clean_up_priv_cmd: 824clean_up_priv_cmd:
825 gk20a_free_priv_cmdbuf(c, incr_cmd); 825 gk20a_free_priv_cmdbuf(c, incr_cmd);
826clean_up_sema: 826clean_up_sema:
827 gk20a_semaphore_put(semaphore); 827 nvgpu_semaphore_put(semaphore);
828 return err; 828 return err;
829} 829}
830 830
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
index d9ad12ad..c3136023 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
@@ -23,7 +23,6 @@
23struct gk20a_channel_sync; 23struct gk20a_channel_sync;
24struct priv_cmd_entry; 24struct priv_cmd_entry;
25struct channel_gk20a; 25struct channel_gk20a;
26struct gk20a_semaphore;
27struct gk20a_fence; 26struct gk20a_fence;
28struct gk20a; 27struct gk20a;
29 28
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index 83fdc05d..d20229b3 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -109,7 +109,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
109 u32 syncpointa, syncpointb; 109 u32 syncpointa, syncpointb;
110 u32 *inst_mem; 110 u32 *inst_mem;
111 struct channel_gk20a *c = g->fifo.channel + hw_chid; 111 struct channel_gk20a *c = g->fifo.channel + hw_chid;
112 struct gk20a_semaphore_int *hw_sema = NULL; 112 struct nvgpu_semaphore_int *hw_sema = NULL;
113 113
114 if (c->hw_sema) 114 if (c->hw_sema)
115 hw_sema = c->hw_sema; 115 hw_sema = c->hw_sema;
@@ -154,7 +154,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
154 "next_val: 0x%08x addr: 0x%010llx\n", 154 "next_val: 0x%08x addr: 0x%010llx\n",
155 readl(hw_sema->value), 155 readl(hw_sema->value),
156 atomic_read(&hw_sema->next_value), 156 atomic_read(&hw_sema->next_value),
157 gk20a_hw_sema_addr(hw_sema)); 157 nvgpu_hw_sema_addr(hw_sema));
158 158
159#ifdef CONFIG_TEGRA_GK20A 159#ifdef CONFIG_TEGRA_GK20A
160 if ((pbdma_syncpointb_op_v(syncpointb) == pbdma_syncpointb_op_wait_v()) 160 if ((pbdma_syncpointb_op_v(syncpointb) == pbdma_syncpointb_op_wait_v())
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
index 6bd59067..115cd2b5 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
@@ -47,7 +47,7 @@ static void gk20a_fence_free(struct kref *ref)
47 sync_fence_put(f->sync_fence); 47 sync_fence_put(f->sync_fence);
48#endif 48#endif
49 if (f->semaphore) 49 if (f->semaphore)
50 gk20a_semaphore_put(f->semaphore); 50 nvgpu_semaphore_put(f->semaphore);
51 51
52 if (f->allocator) { 52 if (f->allocator) {
53 if (nvgpu_alloc_initialized(f->allocator)) 53 if (nvgpu_alloc_initialized(f->allocator))
@@ -193,39 +193,39 @@ void gk20a_init_fence(struct gk20a_fence *f,
193 193
194/* Fences that are backed by GPU semaphores: */ 194/* Fences that are backed by GPU semaphores: */
195 195
196static int gk20a_semaphore_fence_wait(struct gk20a_fence *f, long timeout) 196static int nvgpu_semaphore_fence_wait(struct gk20a_fence *f, long timeout)
197{ 197{
198 long remain; 198 long remain;
199 199
200 if (!gk20a_semaphore_is_acquired(f->semaphore)) 200 if (!nvgpu_semaphore_is_acquired(f->semaphore))
201 return 0; 201 return 0;
202 202
203 remain = wait_event_interruptible_timeout( 203 remain = wait_event_interruptible_timeout(
204 *f->semaphore_wq, 204 *f->semaphore_wq,
205 !gk20a_semaphore_is_acquired(f->semaphore), 205 !nvgpu_semaphore_is_acquired(f->semaphore),
206 timeout); 206 timeout);
207 if (remain == 0 && gk20a_semaphore_is_acquired(f->semaphore)) 207 if (remain == 0 && nvgpu_semaphore_is_acquired(f->semaphore))
208 return -ETIMEDOUT; 208 return -ETIMEDOUT;
209 else if (remain < 0) 209 else if (remain < 0)
210 return remain; 210 return remain;
211 return 0; 211 return 0;
212} 212}
213 213
214static bool gk20a_semaphore_fence_is_expired(struct gk20a_fence *f) 214static bool nvgpu_semaphore_fence_is_expired(struct gk20a_fence *f)
215{ 215{
216 return !gk20a_semaphore_is_acquired(f->semaphore); 216 return !nvgpu_semaphore_is_acquired(f->semaphore);
217} 217}
218 218
219static const struct gk20a_fence_ops gk20a_semaphore_fence_ops = { 219static const struct gk20a_fence_ops nvgpu_semaphore_fence_ops = {
220 .wait = &gk20a_semaphore_fence_wait, 220 .wait = &nvgpu_semaphore_fence_wait,
221 .is_expired = &gk20a_semaphore_fence_is_expired, 221 .is_expired = &nvgpu_semaphore_fence_is_expired,
222}; 222};
223 223
224/* This function takes ownership of the semaphore */ 224/* This function takes ownership of the semaphore */
225int gk20a_fence_from_semaphore( 225int gk20a_fence_from_semaphore(
226 struct gk20a_fence *fence_out, 226 struct gk20a_fence *fence_out,
227 struct sync_timeline *timeline, 227 struct sync_timeline *timeline,
228 struct gk20a_semaphore *semaphore, 228 struct nvgpu_semaphore *semaphore,
229 wait_queue_head_t *semaphore_wq, 229 wait_queue_head_t *semaphore_wq,
230 struct sync_fence *dependency, 230 struct sync_fence *dependency,
231 bool wfi, bool need_sync_fence) 231 bool wfi, bool need_sync_fence)
@@ -237,13 +237,13 @@ int gk20a_fence_from_semaphore(
237 if (need_sync_fence) { 237 if (need_sync_fence) {
238 sync_fence = gk20a_sync_fence_create(timeline, semaphore, 238 sync_fence = gk20a_sync_fence_create(timeline, semaphore,
239 dependency, "f-gk20a-0x%04x", 239 dependency, "f-gk20a-0x%04x",
240 gk20a_semaphore_gpu_ro_va(semaphore)); 240 nvgpu_semaphore_gpu_ro_va(semaphore));
241 if (!sync_fence) 241 if (!sync_fence)
242 return -1; 242 return -1;
243 } 243 }
244#endif 244#endif
245 245
246 gk20a_init_fence(f, &gk20a_semaphore_fence_ops, sync_fence, wfi); 246 gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, sync_fence, wfi);
247 if (!f) { 247 if (!f) {
248#ifdef CONFIG_SYNC 248#ifdef CONFIG_SYNC
249 if (sync_fence) 249 if (sync_fence)
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
index 5abf96e5..b4283f58 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
@@ -24,7 +24,7 @@
24struct platform_device; 24struct platform_device;
25struct sync_timeline; 25struct sync_timeline;
26struct sync_fence; 26struct sync_fence;
27struct gk20a_semaphore; 27struct nvgpu_semaphore;
28struct channel_gk20a; 28struct channel_gk20a;
29 29
30struct gk20a_fence_ops; 30struct gk20a_fence_ops;
@@ -38,7 +38,7 @@ struct gk20a_fence {
38 const struct gk20a_fence_ops *ops; 38 const struct gk20a_fence_ops *ops;
39 39
40 /* Valid for fences created from semaphores: */ 40 /* Valid for fences created from semaphores: */
41 struct gk20a_semaphore *semaphore; 41 struct nvgpu_semaphore *semaphore;
42 wait_queue_head_t *semaphore_wq; 42 wait_queue_head_t *semaphore_wq;
43 43
44 /* Valid for fences created from syncpoints: */ 44 /* Valid for fences created from syncpoints: */
@@ -54,7 +54,7 @@ struct gk20a_fence {
54int gk20a_fence_from_semaphore( 54int gk20a_fence_from_semaphore(
55 struct gk20a_fence *fence_out, 55 struct gk20a_fence *fence_out,
56 struct sync_timeline *timeline, 56 struct sync_timeline *timeline,
57 struct gk20a_semaphore *semaphore, 57 struct nvgpu_semaphore *semaphore,
58 wait_queue_head_t *semaphore_wq, 58 wait_queue_head_t *semaphore_wq,
59 struct sync_fence *dependency, 59 struct sync_fence *dependency,
60 bool wfi, bool need_sync_fence); 60 bool wfi, bool need_sync_fence);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 39be4e9c..8d6d36fc 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -922,7 +922,7 @@ struct gk20a {
922 /* 922 /*
923 * A group of semaphore pools. One for each channel. 923 * A group of semaphore pools. One for each channel.
924 */ 924 */
925 struct gk20a_semaphore_sea *sema_sea; 925 struct nvgpu_semaphore_sea *sema_sea;
926 926
927 /* List of pending SW semaphore waits. */ 927 /* List of pending SW semaphore waits. */
928 struct list_head pending_sema_waits; 928 struct list_head pending_sema_waits;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index cafb1233..c62d1f6c 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -4096,8 +4096,8 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm)
4096 */ 4096 */
4097 if (!gk20a_platform_has_syncpoints(gk20a_from_vm(vm)->dev)) { 4097 if (!gk20a_platform_has_syncpoints(gk20a_from_vm(vm)->dev)) {
4098 if (vm->sema_pool) { 4098 if (vm->sema_pool) {
4099 gk20a_semaphore_pool_unmap(vm->sema_pool, vm); 4099 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
4100 gk20a_semaphore_pool_put(vm->sema_pool); 4100 nvgpu_semaphore_pool_put(vm->sema_pool);
4101 } 4101 }
4102 } 4102 }
4103 4103
@@ -4180,7 +4180,7 @@ const struct gk20a_mmu_level gk20a_mm_levels_128k[] = {
4180 */ 4180 */
4181static int gk20a_init_sema_pool(struct vm_gk20a *vm) 4181static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4182{ 4182{
4183 struct gk20a_semaphore_sea *sema_sea; 4183 struct nvgpu_semaphore_sea *sema_sea;
4184 struct mm_gk20a *mm = vm->mm; 4184 struct mm_gk20a *mm = vm->mm;
4185 struct gk20a *g = mm->g; 4185 struct gk20a *g = mm->g;
4186 int err; 4186 int err;
@@ -4194,11 +4194,11 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4194 if (vm->sema_pool) 4194 if (vm->sema_pool)
4195 return 0; 4195 return 0;
4196 4196
4197 sema_sea = gk20a_semaphore_sea_create(g); 4197 sema_sea = nvgpu_semaphore_sea_create(g);
4198 if (!sema_sea) 4198 if (!sema_sea)
4199 return -ENOMEM; 4199 return -ENOMEM;
4200 4200
4201 vm->sema_pool = gk20a_semaphore_pool_alloc(sema_sea); 4201 vm->sema_pool = nvgpu_semaphore_pool_alloc(sema_sea);
4202 if (!vm->sema_pool) 4202 if (!vm->sema_pool)
4203 return -ENOMEM; 4203 return -ENOMEM;
4204 4204
@@ -4220,9 +4220,9 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4220 return -ENOMEM; 4220 return -ENOMEM;
4221 } 4221 }
4222 4222
4223 err = gk20a_semaphore_pool_map(vm->sema_pool, vm); 4223 err = nvgpu_semaphore_pool_map(vm->sema_pool, vm);
4224 if (err) { 4224 if (err) {
4225 gk20a_semaphore_pool_unmap(vm->sema_pool, vm); 4225 nvgpu_semaphore_pool_unmap(vm->sema_pool, vm);
4226 nvgpu_free(vm->vma[gmmu_page_size_small], 4226 nvgpu_free(vm->vma[gmmu_page_size_small],
4227 vm->sema_pool->gpu_va); 4227 vm->sema_pool->gpu_va);
4228 return err; 4228 return err;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index d39ca2d0..294dc628 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -303,7 +303,7 @@ struct vm_gk20a {
303 /* 303 /*
304 * Each address space needs to have a semaphore pool. 304 * Each address space needs to have a semaphore pool.
305 */ 305 */
306 struct gk20a_semaphore_pool *sema_pool; 306 struct nvgpu_semaphore_pool *sema_pool;
307}; 307};
308 308
309struct gk20a; 309struct gk20a;
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
index e7bacac8..edfe3deb 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
@@ -45,7 +45,7 @@ struct gk20a_sync_timeline {
45struct gk20a_sync_pt { 45struct gk20a_sync_pt {
46 struct kref refcount; 46 struct kref refcount;
47 u32 thresh; 47 u32 thresh;
48 struct gk20a_semaphore *sema; 48 struct nvgpu_semaphore *sema;
49 struct gk20a_sync_timeline *obj; 49 struct gk20a_sync_timeline *obj;
50 struct sync_fence *dep; 50 struct sync_fence *dep;
51 ktime_t dep_timestamp; 51 ktime_t dep_timestamp;
@@ -110,7 +110,7 @@ int gk20a_is_sema_backed_sync_fence(struct sync_fence *fence)
110 return 0; 110 return 0;
111} 111}
112 112
113struct gk20a_semaphore *gk20a_sync_fence_get_sema(struct sync_fence *f) 113struct nvgpu_semaphore *gk20a_sync_fence_get_sema(struct sync_fence *f)
114{ 114{
115 struct sync_pt *spt; 115 struct sync_pt *spt;
116 struct gk20a_sync_pt_inst *pti; 116 struct gk20a_sync_pt_inst *pti;
@@ -208,13 +208,13 @@ static void gk20a_sync_pt_free_shared(struct kref *ref)
208 if (pt->dep) 208 if (pt->dep)
209 sync_fence_put(pt->dep); 209 sync_fence_put(pt->dep);
210 if (pt->sema) 210 if (pt->sema)
211 gk20a_semaphore_put(pt->sema); 211 nvgpu_semaphore_put(pt->sema);
212 kfree(pt); 212 kfree(pt);
213} 213}
214 214
215static struct gk20a_sync_pt *gk20a_sync_pt_create_shared( 215static struct gk20a_sync_pt *gk20a_sync_pt_create_shared(
216 struct gk20a_sync_timeline *obj, 216 struct gk20a_sync_timeline *obj,
217 struct gk20a_semaphore *sema, 217 struct nvgpu_semaphore *sema,
218 struct sync_fence *dependency) 218 struct sync_fence *dependency)
219{ 219{
220 struct gk20a_sync_pt *shared; 220 struct gk20a_sync_pt *shared;
@@ -244,14 +244,14 @@ static struct gk20a_sync_pt *gk20a_sync_pt_create_shared(
244 244
245 spin_lock_init(&shared->lock); 245 spin_lock_init(&shared->lock);
246 246
247 gk20a_semaphore_get(sema); 247 nvgpu_semaphore_get(sema);
248 248
249 return shared; 249 return shared;
250} 250}
251 251
252static struct sync_pt *gk20a_sync_pt_create_inst( 252static struct sync_pt *gk20a_sync_pt_create_inst(
253 struct gk20a_sync_timeline *obj, 253 struct gk20a_sync_timeline *obj,
254 struct gk20a_semaphore *sema, 254 struct nvgpu_semaphore *sema,
255 struct sync_fence *dependency) 255 struct sync_fence *dependency)
256{ 256{
257 struct gk20a_sync_pt_inst *pti; 257 struct gk20a_sync_pt_inst *pti;
@@ -309,7 +309,7 @@ static int gk20a_sync_pt_has_signaled(struct sync_pt *sync_pt)
309 goto done; 309 goto done;
310 310
311 /* Acquired == not realeased yet == active == not signaled. */ 311 /* Acquired == not realeased yet == active == not signaled. */
312 signaled = !gk20a_semaphore_is_acquired(pt->sema); 312 signaled = !nvgpu_semaphore_is_acquired(pt->sema);
313 313
314 if (signaled) { 314 if (signaled) {
315 /* Update min if necessary. */ 315 /* Update min if necessary. */
@@ -341,7 +341,7 @@ static int gk20a_sync_pt_has_signaled(struct sync_pt *sync_pt)
341 } 341 }
342 342
343 /* Release the semaphore to the pool. */ 343 /* Release the semaphore to the pool. */
344 gk20a_semaphore_put(pt->sema); 344 nvgpu_semaphore_put(pt->sema);
345 pt->sema = NULL; 345 pt->sema = NULL;
346 } 346 }
347done: 347done:
@@ -410,12 +410,12 @@ static void gk20a_sync_timeline_value_str(struct sync_timeline *timeline,
410static void gk20a_sync_pt_value_str_for_sema(struct gk20a_sync_pt *pt, 410static void gk20a_sync_pt_value_str_for_sema(struct gk20a_sync_pt *pt,
411 char *str, int size) 411 char *str, int size)
412{ 412{
413 struct gk20a_semaphore *s = pt->sema; 413 struct nvgpu_semaphore *s = pt->sema;
414 414
415 snprintf(str, size, "S: c=%d [v=%u,r_v=%u]", 415 snprintf(str, size, "S: c=%d [v=%u,r_v=%u]",
416 s->hw_sema->ch->hw_chid, 416 s->hw_sema->ch->hw_chid,
417 gk20a_semaphore_get_value(s), 417 nvgpu_semaphore_get_value(s),
418 gk20a_semaphore_read(s)); 418 nvgpu_semaphore_read(s));
419} 419}
420 420
421static void gk20a_sync_pt_value_str(struct sync_pt *sync_pt, char *str, 421static void gk20a_sync_pt_value_str(struct sync_pt *sync_pt, char *str,
@@ -458,7 +458,7 @@ static int gk20a_sync_fill_driver_data(struct sync_pt *sync_pt,
458} 458}
459 459
460static const struct sync_timeline_ops gk20a_sync_timeline_ops = { 460static const struct sync_timeline_ops gk20a_sync_timeline_ops = {
461 .driver_name = "gk20a_semaphore", 461 .driver_name = "nvgpu_semaphore",
462 .dup = gk20a_sync_pt_dup_inst, 462 .dup = gk20a_sync_pt_dup_inst,
463 .has_signaled = gk20a_sync_pt_has_signaled, 463 .has_signaled = gk20a_sync_pt_has_signaled,
464 .compare = gk20a_sync_pt_compare, 464 .compare = gk20a_sync_pt_compare,
@@ -508,7 +508,7 @@ struct sync_timeline *gk20a_sync_timeline_create(
508} 508}
509 509
510struct sync_fence *gk20a_sync_fence_create(struct sync_timeline *obj, 510struct sync_fence *gk20a_sync_fence_create(struct sync_timeline *obj,
511 struct gk20a_semaphore *sema, 511 struct nvgpu_semaphore *sema,
512 struct sync_fence *dependency, 512 struct sync_fence *dependency,
513 const char *fmt, ...) 513 const char *fmt, ...)
514{ 514{
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.h b/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
index 88bf07cd..dcced5c8 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
@@ -24,18 +24,18 @@
24struct sync_timeline; 24struct sync_timeline;
25struct sync_fence; 25struct sync_fence;
26struct sync_pt; 26struct sync_pt;
27struct gk20a_semaphore; 27struct nvgpu_semaphore;
28struct fence; 28struct fence;
29 29
30int gk20a_is_sema_backed_sync_fence(struct sync_fence *fence); 30int gk20a_is_sema_backed_sync_fence(struct sync_fence *fence);
31struct gk20a_semaphore *gk20a_sync_fence_get_sema(struct sync_fence *f); 31struct nvgpu_semaphore *gk20a_sync_fence_get_sema(struct sync_fence *f);
32 32
33#ifdef CONFIG_SYNC 33#ifdef CONFIG_SYNC
34struct sync_timeline *gk20a_sync_timeline_create(const char *fmt, ...); 34struct sync_timeline *gk20a_sync_timeline_create(const char *fmt, ...);
35void gk20a_sync_timeline_destroy(struct sync_timeline *); 35void gk20a_sync_timeline_destroy(struct sync_timeline *);
36void gk20a_sync_timeline_signal(struct sync_timeline *); 36void gk20a_sync_timeline_signal(struct sync_timeline *);
37struct sync_fence *gk20a_sync_fence_create(struct sync_timeline *, 37struct sync_fence *gk20a_sync_fence_create(struct sync_timeline *,
38 struct gk20a_semaphore *, 38 struct nvgpu_semaphore *,
39 struct sync_fence *dependency, 39 struct sync_fence *dependency,
40 const char *fmt, ...); 40 const char *fmt, ...);
41struct sync_fence *gk20a_sync_fence_fdget(int fd); 41struct sync_fence *gk20a_sync_fence_fdget(int fd);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
index 07a27584..cc4921d3 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
@@ -37,19 +37,19 @@
37#define SEMAPHORE_SIZE 16 37#define SEMAPHORE_SIZE 16
38#define SEMAPHORE_SEA_GROWTH_RATE 32 38#define SEMAPHORE_SEA_GROWTH_RATE 32
39 39
40struct gk20a_semaphore_sea; 40struct nvgpu_semaphore_sea;
41 41
42/* 42/*
43 * Underlying semaphore data structure. This semaphore can be shared amongst 43 * Underlying semaphore data structure. This semaphore can be shared amongst
44 * other semaphore instances. 44 * other semaphore instances.
45 */ 45 */
46struct gk20a_semaphore_int { 46struct nvgpu_semaphore_int {
47 int idx; /* Semaphore index. */ 47 int idx; /* Semaphore index. */
48 u32 offset; /* Offset into the pool. */ 48 u32 offset; /* Offset into the pool. */
49 atomic_t next_value; /* Next available value. */ 49 atomic_t next_value; /* Next available value. */
50 u32 *value; /* Current value (access w/ readl()). */ 50 u32 *value; /* Current value (access w/ readl()). */
51 u32 nr_incrs; /* Number of increments programmed. */ 51 u32 nr_incrs; /* Number of increments programmed. */
52 struct gk20a_semaphore_pool *p; /* Pool that owns this sema. */ 52 struct nvgpu_semaphore_pool *p; /* Pool that owns this sema. */
53 struct channel_gk20a *ch; /* Channel that owns this sema. */ 53 struct channel_gk20a *ch; /* Channel that owns this sema. */
54 struct list_head hw_sema_list; /* List of HW semaphores. */ 54 struct list_head hw_sema_list; /* List of HW semaphores. */
55}; 55};
@@ -59,8 +59,8 @@ struct gk20a_semaphore_int {
59 * pointer to a real semaphore and a value to wait for. This allows one physical 59 * pointer to a real semaphore and a value to wait for. This allows one physical
60 * semaphore to be shared among an essentially infinite number of submits. 60 * semaphore to be shared among an essentially infinite number of submits.
61 */ 61 */
62struct gk20a_semaphore { 62struct nvgpu_semaphore {
63 struct gk20a_semaphore_int *hw_sema; 63 struct nvgpu_semaphore_int *hw_sema;
64 64
65 atomic_t value; 65 atomic_t value;
66 int incremented; 66 int incremented;
@@ -71,7 +71,7 @@ struct gk20a_semaphore {
71/* 71/*
72 * A semaphore pool. Each address space will own exactly one of these. 72 * A semaphore pool. Each address space will own exactly one of these.
73 */ 73 */
74struct gk20a_semaphore_pool { 74struct nvgpu_semaphore_pool {
75 struct page *page; /* This pool's page of memory */ 75 struct page *page; /* This pool's page of memory */
76 struct list_head pool_list_entry; /* Node for list of pools. */ 76 struct list_head pool_list_entry; /* Node for list of pools. */
77 void *cpu_va; /* CPU access to the pool. */ 77 void *cpu_va; /* CPU access to the pool. */
@@ -82,7 +82,7 @@ struct gk20a_semaphore_pool {
82 struct list_head hw_semas; /* List of HW semas. */ 82 struct list_head hw_semas; /* List of HW semas. */
83 DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE); 83 DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE);
84 84
85 struct gk20a_semaphore_sea *sema_sea; /* Sea that owns this pool. */ 85 struct nvgpu_semaphore_sea *sema_sea; /* Sea that owns this pool. */
86 86
87 struct mutex pool_lock; 87 struct mutex pool_lock;
88 88
@@ -114,7 +114,7 @@ struct gk20a_semaphore_pool {
114 * pool. Channels then allocate regular semaphores - basically just a value that 114 * pool. Channels then allocate regular semaphores - basically just a value that
115 * signifies when a particular job is done. 115 * signifies when a particular job is done.
116 */ 116 */
117struct gk20a_semaphore_sea { 117struct nvgpu_semaphore_sea {
118 struct list_head pool_list; /* List of pools in this sea. */ 118 struct list_head pool_list; /* List of pools in this sea. */
119 struct gk20a *gk20a; 119 struct gk20a *gk20a;
120 120
@@ -149,33 +149,33 @@ struct gk20a_semaphore_sea {
149/* 149/*
150 * Semaphore sea functions. 150 * Semaphore sea functions.
151 */ 151 */
152struct gk20a_semaphore_sea *gk20a_semaphore_sea_create(struct gk20a *gk20a); 152struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *gk20a);
153int gk20a_semaphore_sea_map(struct gk20a_semaphore_pool *sea, 153int nvgpu_semaphore_sea_map(struct nvgpu_semaphore_pool *sea,
154 struct vm_gk20a *vm); 154 struct vm_gk20a *vm);
155void gk20a_semaphore_sea_unmap(struct gk20a_semaphore_pool *sea, 155void nvgpu_semaphore_sea_unmap(struct nvgpu_semaphore_pool *sea,
156 struct vm_gk20a *vm); 156 struct vm_gk20a *vm);
157struct gk20a_semaphore_sea *gk20a_semaphore_get_sea(struct gk20a *g); 157struct nvgpu_semaphore_sea *nvgpu_semaphore_get_sea(struct gk20a *g);
158 158
159/* 159/*
160 * Semaphore pool functions. 160 * Semaphore pool functions.
161 */ 161 */
162struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc( 162struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
163 struct gk20a_semaphore_sea *sea); 163 struct nvgpu_semaphore_sea *sea);
164int gk20a_semaphore_pool_map(struct gk20a_semaphore_pool *pool, 164int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *pool,
165 struct vm_gk20a *vm); 165 struct vm_gk20a *vm);
166void gk20a_semaphore_pool_unmap(struct gk20a_semaphore_pool *pool, 166void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *pool,
167 struct vm_gk20a *vm); 167 struct vm_gk20a *vm);
168u64 __gk20a_semaphore_pool_gpu_va(struct gk20a_semaphore_pool *p, bool global); 168u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global);
169void gk20a_semaphore_pool_get(struct gk20a_semaphore_pool *p); 169void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p);
170void gk20a_semaphore_pool_put(struct gk20a_semaphore_pool *p); 170void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p);
171 171
172/* 172/*
173 * Semaphore functions. 173 * Semaphore functions.
174 */ 174 */
175struct gk20a_semaphore *gk20a_semaphore_alloc(struct channel_gk20a *ch); 175struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch);
176void gk20a_semaphore_put(struct gk20a_semaphore *s); 176void nvgpu_semaphore_put(struct nvgpu_semaphore *s);
177void gk20a_semaphore_get(struct gk20a_semaphore *s); 177void nvgpu_semaphore_get(struct nvgpu_semaphore *s);
178void gk20a_semaphore_free_hw_sema(struct channel_gk20a *ch); 178void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch);
179 179
180/* 180/*
181 * Return the address of a specific semaphore. 181 * Return the address of a specific semaphore.
@@ -183,9 +183,9 @@ void gk20a_semaphore_free_hw_sema(struct channel_gk20a *ch);
183 * Don't call this on a semaphore you don't own - the VA returned will make no 183 * Don't call this on a semaphore you don't own - the VA returned will make no
184 * sense in your specific channel's VM. 184 * sense in your specific channel's VM.
185 */ 185 */
186static inline u64 gk20a_semaphore_gpu_rw_va(struct gk20a_semaphore *s) 186static inline u64 nvgpu_semaphore_gpu_rw_va(struct nvgpu_semaphore *s)
187{ 187{
188 return __gk20a_semaphore_pool_gpu_va(s->hw_sema->p, false) + 188 return __nvgpu_semaphore_pool_gpu_va(s->hw_sema->p, false) +
189 s->hw_sema->offset; 189 s->hw_sema->offset;
190} 190}
191 191
@@ -193,22 +193,22 @@ static inline u64 gk20a_semaphore_gpu_rw_va(struct gk20a_semaphore *s)
193 * Get the global RO address for the semaphore. Can be called on any semaphore 193 * Get the global RO address for the semaphore. Can be called on any semaphore
194 * regardless of whether you own it. 194 * regardless of whether you own it.
195 */ 195 */
196static inline u64 gk20a_semaphore_gpu_ro_va(struct gk20a_semaphore *s) 196static inline u64 nvgpu_semaphore_gpu_ro_va(struct nvgpu_semaphore *s)
197{ 197{
198 return __gk20a_semaphore_pool_gpu_va(s->hw_sema->p, true) + 198 return __nvgpu_semaphore_pool_gpu_va(s->hw_sema->p, true) +
199 s->hw_sema->offset; 199 s->hw_sema->offset;
200} 200}
201 201
202static inline u64 gk20a_hw_sema_addr(struct gk20a_semaphore_int *hw_sema) 202static inline u64 nvgpu_hw_sema_addr(struct nvgpu_semaphore_int *hw_sema)
203{ 203{
204 return __gk20a_semaphore_pool_gpu_va(hw_sema->p, true) + 204 return __nvgpu_semaphore_pool_gpu_va(hw_sema->p, true) +
205 hw_sema->offset; 205 hw_sema->offset;
206} 206}
207 207
208/* 208/*
209 * TODO: handle wrap around... Hmm, how to do this? 209 * TODO: handle wrap around... Hmm, how to do this?
210 */ 210 */
211static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s) 211static inline bool nvgpu_semaphore_is_released(struct nvgpu_semaphore *s)
212{ 212{
213 u32 sema_val = readl(s->hw_sema->value); 213 u32 sema_val = readl(s->hw_sema->value);
214 214
@@ -220,25 +220,25 @@ static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s)
220 return (int)sema_val >= atomic_read(&s->value); 220 return (int)sema_val >= atomic_read(&s->value);
221} 221}
222 222
223static inline bool gk20a_semaphore_is_acquired(struct gk20a_semaphore *s) 223static inline bool nvgpu_semaphore_is_acquired(struct nvgpu_semaphore *s)
224{ 224{
225 return !gk20a_semaphore_is_released(s); 225 return !nvgpu_semaphore_is_released(s);
226} 226}
227 227
228/* 228/*
229 * Read the underlying value from a semaphore. 229 * Read the underlying value from a semaphore.
230 */ 230 */
231static inline u32 gk20a_semaphore_read(struct gk20a_semaphore *s) 231static inline u32 nvgpu_semaphore_read(struct nvgpu_semaphore *s)
232{ 232{
233 return readl(s->hw_sema->value); 233 return readl(s->hw_sema->value);
234} 234}
235 235
236static inline u32 gk20a_semaphore_get_value(struct gk20a_semaphore *s) 236static inline u32 nvgpu_semaphore_get_value(struct nvgpu_semaphore *s)
237{ 237{
238 return (u32)atomic_read(&s->value); 238 return (u32)atomic_read(&s->value);
239} 239}
240 240
241static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s) 241static inline u32 nvgpu_semaphore_next_value(struct nvgpu_semaphore *s)
242{ 242{
243 return (u32)atomic_read(&s->hw_sema->next_value); 243 return (u32)atomic_read(&s->hw_sema->next_value);
244} 244}
@@ -247,11 +247,11 @@ static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s)
247 * If @force is set then this will not wait for the underlying semaphore to 247 * If @force is set then this will not wait for the underlying semaphore to
248 * catch up to the passed semaphore. 248 * catch up to the passed semaphore.
249 */ 249 */
250static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s, 250static inline void __nvgpu_semaphore_release(struct nvgpu_semaphore *s,
251 bool force) 251 bool force)
252{ 252{
253 u32 current_val; 253 u32 current_val;
254 u32 val = gk20a_semaphore_get_value(s); 254 u32 val = nvgpu_semaphore_get_value(s);
255 int attempts = 0; 255 int attempts = 0;
256 256
257 /* 257 /*
@@ -260,7 +260,7 @@ static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s,
260 * 260 *
261 * TODO: tune the wait a little better. 261 * TODO: tune the wait a little better.
262 */ 262 */
263 while ((current_val = gk20a_semaphore_read(s)) < (val - 1)) { 263 while ((current_val = nvgpu_semaphore_read(s)) < (val - 1)) {
264 if (force) 264 if (force)
265 break; 265 break;
266 msleep(100); 266 msleep(100);
@@ -284,21 +284,21 @@ static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s,
284 s->hw_sema->ch->hw_chid, val); 284 s->hw_sema->ch->hw_chid, val);
285} 285}
286 286
287static inline void gk20a_semaphore_release(struct gk20a_semaphore *s) 287static inline void nvgpu_semaphore_release(struct nvgpu_semaphore *s)
288{ 288{
289 __gk20a_semaphore_release(s, false); 289 __nvgpu_semaphore_release(s, false);
290} 290}
291 291
292/* 292/*
293 * Configure a software based increment on this semaphore. This is useful for 293 * Configure a software based increment on this semaphore. This is useful for
294 * when we want the GPU to wait on a SW event before processing a channel. 294 * when we want the GPU to wait on a SW event before processing a channel.
295 * Another way to describe this is when the GPU needs to wait on a SW pre-fence. 295 * Another way to describe this is when the GPU needs to wait on a SW pre-fence.
296 * The pre-fence signals SW which in turn calls gk20a_semaphore_release() which 296 * The pre-fence signals SW which in turn calls nvgpu_semaphore_release() which
297 * then allows the GPU to continue. 297 * then allows the GPU to continue.
298 * 298 *
299 * Also used to prep a semaphore for an INCR by the GPU. 299 * Also used to prep a semaphore for an INCR by the GPU.
300 */ 300 */
301static inline void gk20a_semaphore_incr(struct gk20a_semaphore *s) 301static inline void nvgpu_semaphore_incr(struct nvgpu_semaphore *s)
302{ 302{
303 BUG_ON(s->incremented); 303 BUG_ON(s->incremented);
304 304
@@ -307,6 +307,6 @@ static inline void gk20a_semaphore_incr(struct gk20a_semaphore *s)
307 307
308 gpu_sema_verbose_dbg("INCR sema for c=%d (%u)", 308 gpu_sema_verbose_dbg("INCR sema for c=%d (%u)",
309 s->hw_sema->ch->hw_chid, 309 s->hw_sema->ch->hw_chid,
310 gk20a_semaphore_next_value(s)); 310 nvgpu_semaphore_next_value(s));
311} 311}
312#endif 312#endif