diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c | 29 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 2 |
2 files changed, 19 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c index d9dfb133..fc5862e1 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c | |||
@@ -399,7 +399,14 @@ struct wait_fence_work { | |||
399 | struct channel_gk20a *ch; | 399 | struct channel_gk20a *ch; |
400 | struct nvgpu_semaphore *sema; | 400 | struct nvgpu_semaphore *sema; |
401 | struct gk20a *g; | 401 | struct gk20a *g; |
402 | struct list_head entry; | 402 | struct nvgpu_list_node entry; |
403 | }; | ||
404 | |||
405 | static inline struct wait_fence_work * | ||
406 | wait_fence_work_from_entry(struct nvgpu_list_node *node) | ||
407 | { | ||
408 | return (struct wait_fence_work *) | ||
409 | ((uintptr_t)node - offsetof(struct wait_fence_work, entry)); | ||
403 | }; | 410 | }; |
404 | 411 | ||
405 | /* | 412 | /* |
@@ -413,7 +420,7 @@ static void gk20a_add_pending_sema_wait(struct gk20a *g, | |||
413 | struct wait_fence_work *work) | 420 | struct wait_fence_work *work) |
414 | { | 421 | { |
415 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); | 422 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); |
416 | list_add(&work->entry, &g->pending_sema_waits); | 423 | nvgpu_list_add(&work->entry, &g->pending_sema_waits); |
417 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); | 424 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); |
418 | } | 425 | } |
419 | 426 | ||
@@ -422,10 +429,10 @@ static void gk20a_add_pending_sema_wait(struct gk20a *g, | |||
422 | * then delete the entire pending list. | 429 | * then delete the entire pending list. |
423 | */ | 430 | */ |
424 | static void gk20a_start_sema_wait_cancel(struct gk20a *g, | 431 | static void gk20a_start_sema_wait_cancel(struct gk20a *g, |
425 | struct list_head *list) | 432 | struct nvgpu_list_node *list) |
426 | { | 433 | { |
427 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); | 434 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); |
428 | list_replace_init(&g->pending_sema_waits, list); | 435 | nvgpu_list_replace_init(&g->pending_sema_waits, list); |
429 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); | 436 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); |
430 | } | 437 | } |
431 | 438 | ||
@@ -448,18 +455,18 @@ static void gk20a_start_sema_wait_cancel(struct gk20a *g, | |||
448 | void gk20a_channel_cancel_pending_sema_waits(struct gk20a *g) | 455 | void gk20a_channel_cancel_pending_sema_waits(struct gk20a *g) |
449 | { | 456 | { |
450 | struct wait_fence_work *work; | 457 | struct wait_fence_work *work; |
451 | struct list_head local_pending_sema_waits; | 458 | struct nvgpu_list_node local_pending_sema_waits; |
452 | 459 | ||
453 | gk20a_start_sema_wait_cancel(g, &local_pending_sema_waits); | 460 | gk20a_start_sema_wait_cancel(g, &local_pending_sema_waits); |
454 | 461 | ||
455 | while (!list_empty(&local_pending_sema_waits)) { | 462 | while (!nvgpu_list_empty(&local_pending_sema_waits)) { |
456 | int ret; | 463 | int ret; |
457 | 464 | ||
458 | work = list_first_entry(&local_pending_sema_waits, | 465 | work = nvgpu_list_first_entry(&local_pending_sema_waits, |
459 | struct wait_fence_work, | 466 | wait_fence_work, |
460 | entry); | 467 | entry); |
461 | 468 | ||
462 | list_del_init(&work->entry); | 469 | nvgpu_list_del(&work->entry); |
463 | 470 | ||
464 | /* | 471 | /* |
465 | * Only nvgpu_kfree() work if the cancel is successful. | 472 | * Only nvgpu_kfree() work if the cancel is successful. |
@@ -486,8 +493,8 @@ static void gk20a_channel_semaphore_launcher( | |||
486 | * otherwise it's possible that the deterministic submit path suffers. | 493 | * otherwise it's possible that the deterministic submit path suffers. |
487 | */ | 494 | */ |
488 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); | 495 | nvgpu_raw_spinlock_acquire(&g->pending_sema_waits_lock); |
489 | if (!list_empty(&g->pending_sema_waits)) | 496 | if (!nvgpu_list_empty(&g->pending_sema_waits)) |
490 | list_del_init(&w->entry); | 497 | nvgpu_list_del(&w->entry); |
491 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); | 498 | nvgpu_raw_spinlock_release(&g->pending_sema_waits_lock); |
492 | 499 | ||
493 | gk20a_dbg_info("waiting for pre fence %p '%s'", | 500 | gk20a_dbg_info("waiting for pre fence %p '%s'", |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index bfb8b204..9d931520 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -1009,7 +1009,7 @@ struct gk20a { | |||
1009 | struct nvgpu_semaphore_sea *sema_sea; | 1009 | struct nvgpu_semaphore_sea *sema_sea; |
1010 | 1010 | ||
1011 | /* List of pending SW semaphore waits. */ | 1011 | /* List of pending SW semaphore waits. */ |
1012 | struct list_head pending_sema_waits; | 1012 | struct nvgpu_list_node pending_sema_waits; |
1013 | struct nvgpu_raw_spinlock pending_sema_waits_lock; | 1013 | struct nvgpu_raw_spinlock pending_sema_waits_lock; |
1014 | 1014 | ||
1015 | /* held while manipulating # of debug/profiler sessions present */ | 1015 | /* held while manipulating # of debug/profiler sessions present */ |