diff options
author | Deepak Nibade <dnibade@nvidia.com> | 2017-04-04 04:32:42 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-04-10 02:54:26 -0400 |
commit | e4464fd552d0bee2ca149c6a51fbf88b0fafb531 (patch) | |
tree | 8124ac83f97483839522aaf644e1bde00b128177 /drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | |
parent | db2ee5c127afc3c270b0a5c0e74660f0532dafbe (diff) |
gpu: nvgpu: use nvgpu list for channel and debug session lists
Use nvgpu list APIs instead of linux list APIs
to store channel list in debug session and to store
debug session list in channel
Jira NVGPU-13
Change-Id: Iaf89524955a155adcb8a24505df6613bd9c4ccfb
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/1454690
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | 35 |
1 files changed, 20 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index 1ebb1900..1eaca686 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | |||
@@ -47,13 +47,13 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s) | |||
47 | struct gk20a *g = dbg_s->g; | 47 | struct gk20a *g = dbg_s->g; |
48 | 48 | ||
49 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | 49 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
50 | if (list_empty(&dbg_s->ch_list)) { | 50 | if (nvgpu_list_empty(&dbg_s->ch_list)) { |
51 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | 51 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
52 | return NULL; | 52 | return NULL; |
53 | } | 53 | } |
54 | 54 | ||
55 | ch_data = list_first_entry(&dbg_s->ch_list, | 55 | ch_data = nvgpu_list_first_entry(&dbg_s->ch_list, |
56 | struct dbg_session_channel_data, | 56 | dbg_session_channel_data, |
57 | ch_entry); | 57 | ch_entry); |
58 | ch = g->fifo.channel + ch_data->chid; | 58 | ch = g->fifo.channel + ch_data->chid; |
59 | 59 | ||
@@ -138,7 +138,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, | |||
138 | dbg_session->is_timeout_disabled = false; | 138 | dbg_session->is_timeout_disabled = false; |
139 | 139 | ||
140 | init_waitqueue_head(&dbg_session->dbg_events.wait_queue); | 140 | init_waitqueue_head(&dbg_session->dbg_events.wait_queue); |
141 | INIT_LIST_HEAD(&dbg_session->ch_list); | 141 | nvgpu_init_list_node(&dbg_session->ch_list); |
142 | err = nvgpu_mutex_init(&dbg_session->ch_list_lock); | 142 | err = nvgpu_mutex_init(&dbg_session->ch_list_lock); |
143 | if (err) | 143 | if (err) |
144 | goto err_free_session; | 144 | goto err_free_session; |
@@ -305,7 +305,8 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) | |||
305 | /* guard against the session list being modified */ | 305 | /* guard against the session list being modified */ |
306 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | 306 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
307 | 307 | ||
308 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { | 308 | nvgpu_list_for_each_entry(session_data, &ch->dbg_s_list, |
309 | dbg_session_data, dbg_s_entry) { | ||
309 | dbg_s = session_data->dbg_s; | 310 | dbg_s = session_data->dbg_s; |
310 | if (dbg_s->dbg_events.events_enabled) { | 311 | if (dbg_s->dbg_events.events_enabled) { |
311 | gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d", | 312 | gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d", |
@@ -333,7 +334,8 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
333 | /* guard against the session list being modified */ | 334 | /* guard against the session list being modified */ |
334 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | 335 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
335 | 336 | ||
336 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { | 337 | nvgpu_list_for_each_entry(session_data, &ch->dbg_s_list, |
338 | dbg_session_data, dbg_s_entry) { | ||
337 | dbg_s = session_data->dbg_s; | 339 | dbg_s = session_data->dbg_s; |
338 | if (dbg_s->broadcast_stop_trigger) { | 340 | if (dbg_s->broadcast_stop_trigger) { |
339 | gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, | 341 | gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, |
@@ -358,7 +360,8 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
358 | /* guard against the session list being modified */ | 360 | /* guard against the session list being modified */ |
359 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | 361 | nvgpu_mutex_acquire(&ch->dbg_s_lock); |
360 | 362 | ||
361 | list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { | 363 | nvgpu_list_for_each_entry(session_data, &ch->dbg_s_list, |
364 | dbg_session_data, dbg_s_entry) { | ||
362 | dbg_s = session_data->dbg_s; | 365 | dbg_s = session_data->dbg_s; |
363 | if (dbg_s->broadcast_stop_trigger) { | 366 | if (dbg_s->broadcast_stop_trigger) { |
364 | gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, | 367 | gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, |
@@ -440,10 +443,10 @@ int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s, | |||
440 | } | 443 | } |
441 | } | 444 | } |
442 | 445 | ||
443 | list_del_init(&ch_data->ch_entry); | 446 | nvgpu_list_del(&ch_data->ch_entry); |
444 | 447 | ||
445 | session_data = ch_data->session_data; | 448 | session_data = ch_data->session_data; |
446 | list_del_init(&session_data->dbg_s_entry); | 449 | nvgpu_list_del(&session_data->dbg_s_entry); |
447 | nvgpu_kfree(dbg_s->g, session_data); | 450 | nvgpu_kfree(dbg_s->g, session_data); |
448 | 451 | ||
449 | fput(ch_data->ch_f); | 452 | fput(ch_data->ch_f); |
@@ -459,7 +462,8 @@ static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s) | |||
459 | 462 | ||
460 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | 463 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
461 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | 464 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
462 | list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, ch_entry) | 465 | nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, |
466 | dbg_session_channel_data, ch_entry) | ||
463 | dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | 467 | dbg_unbind_single_channel_gk20a(dbg_s, ch_data); |
464 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | 468 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
465 | nvgpu_mutex_release(&g->dbg_sessions_lock); | 469 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
@@ -486,7 +490,8 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | |||
486 | } | 490 | } |
487 | 491 | ||
488 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | 492 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
489 | list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { | 493 | nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, |
494 | dbg_session_channel_data, ch_entry) { | ||
490 | if (ch->hw_chid == ch_data->chid) { | 495 | if (ch->hw_chid == ch_data->chid) { |
491 | channel_found = true; | 496 | channel_found = true; |
492 | break; | 497 | break; |
@@ -591,7 +596,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | |||
591 | ch_data->ch_f = f; | 596 | ch_data->ch_f = f; |
592 | ch_data->channel_fd = args->channel_fd; | 597 | ch_data->channel_fd = args->channel_fd; |
593 | ch_data->chid = ch->hw_chid; | 598 | ch_data->chid = ch->hw_chid; |
594 | INIT_LIST_HEAD(&ch_data->ch_entry); | 599 | nvgpu_init_list_node(&ch_data->ch_entry); |
595 | 600 | ||
596 | session_data = nvgpu_kzalloc(g, sizeof(*session_data)); | 601 | session_data = nvgpu_kzalloc(g, sizeof(*session_data)); |
597 | if (!session_data) { | 602 | if (!session_data) { |
@@ -600,13 +605,13 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | |||
600 | return -ENOMEM; | 605 | return -ENOMEM; |
601 | } | 606 | } |
602 | session_data->dbg_s = dbg_s; | 607 | session_data->dbg_s = dbg_s; |
603 | INIT_LIST_HEAD(&session_data->dbg_s_entry); | 608 | nvgpu_init_list_node(&session_data->dbg_s_entry); |
604 | ch_data->session_data = session_data; | 609 | ch_data->session_data = session_data; |
605 | 610 | ||
606 | list_add(&session_data->dbg_s_entry, &ch->dbg_s_list); | 611 | nvgpu_list_add(&session_data->dbg_s_entry, &ch->dbg_s_list); |
607 | 612 | ||
608 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | 613 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); |
609 | list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list); | 614 | nvgpu_list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list); |
610 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | 615 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
611 | 616 | ||
612 | nvgpu_mutex_release(&ch->dbg_s_lock); | 617 | nvgpu_mutex_release(&ch->dbg_s_lock); |