summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c134
1 files changed, 67 insertions, 67 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index ac11e378..f6290e1d 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -44,9 +44,9 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s)
44 struct channel_gk20a *ch; 44 struct channel_gk20a *ch;
45 struct gk20a *g = dbg_s->g; 45 struct gk20a *g = dbg_s->g;
46 46
47 mutex_lock(&dbg_s->ch_list_lock); 47 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
48 if (list_empty(&dbg_s->ch_list)) { 48 if (list_empty(&dbg_s->ch_list)) {
49 mutex_unlock(&dbg_s->ch_list_lock); 49 nvgpu_mutex_release(&dbg_s->ch_list_lock);
50 return NULL; 50 return NULL;
51 } 51 }
52 52
@@ -55,7 +55,7 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s)
55 ch_entry); 55 ch_entry);
56 ch = g->fifo.channel + ch_data->chid; 56 ch = g->fifo.channel + ch_data->chid;
57 57
58 mutex_unlock(&dbg_s->ch_list_lock); 58 nvgpu_mutex_release(&dbg_s->ch_list_lock);
59 59
60 return ch; 60 return ch;
61} 61}
@@ -116,8 +116,8 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
116 116
117 init_waitqueue_head(&dbg_session->dbg_events.wait_queue); 117 init_waitqueue_head(&dbg_session->dbg_events.wait_queue);
118 INIT_LIST_HEAD(&dbg_session->ch_list); 118 INIT_LIST_HEAD(&dbg_session->ch_list);
119 mutex_init(&dbg_session->ch_list_lock); 119 nvgpu_mutex_init(&dbg_session->ch_list_lock);
120 mutex_init(&dbg_session->ioctl_lock); 120 nvgpu_mutex_init(&dbg_session->ioctl_lock);
121 dbg_session->dbg_events.events_enabled = false; 121 dbg_session->dbg_events.events_enabled = false;
122 dbg_session->dbg_events.num_pending_events = 0; 122 dbg_session->dbg_events.num_pending_events = 0;
123 123
@@ -127,61 +127,61 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
127/* used in scenarios where the debugger session can take just the inter-session 127/* used in scenarios where the debugger session can take just the inter-session
128 * lock for performance, but the profiler session must take the per-gpu lock 128 * lock for performance, but the profiler session must take the per-gpu lock
129 * since it might not have an associated channel. */ 129 * since it might not have an associated channel. */
130static void gk20a_dbg_session_mutex_lock(struct dbg_session_gk20a *dbg_s) 130static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s)
131{ 131{
132 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 132 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
133 133
134 if (dbg_s->is_profiler || !ch) 134 if (dbg_s->is_profiler || !ch)
135 mutex_lock(&dbg_s->g->dbg_sessions_lock); 135 nvgpu_mutex_acquire(&dbg_s->g->dbg_sessions_lock);
136 else 136 else
137 mutex_lock(&ch->dbg_s_lock); 137 nvgpu_mutex_acquire(&ch->dbg_s_lock);
138} 138}
139 139
140static void gk20a_dbg_session_mutex_unlock(struct dbg_session_gk20a *dbg_s) 140static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s)
141{ 141{
142 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 142 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
143 143
144 if (dbg_s->is_profiler || !ch) 144 if (dbg_s->is_profiler || !ch)
145 mutex_unlock(&dbg_s->g->dbg_sessions_lock); 145 nvgpu_mutex_release(&dbg_s->g->dbg_sessions_lock);
146 else 146 else
147 mutex_unlock(&ch->dbg_s_lock); 147 nvgpu_mutex_release(&ch->dbg_s_lock);
148} 148}
149 149
150static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) 150static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
151{ 151{
152 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 152 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
153 153
154 gk20a_dbg_session_mutex_lock(dbg_s); 154 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
155 155
156 dbg_s->dbg_events.events_enabled = true; 156 dbg_s->dbg_events.events_enabled = true;
157 dbg_s->dbg_events.num_pending_events = 0; 157 dbg_s->dbg_events.num_pending_events = 0;
158 158
159 gk20a_dbg_session_mutex_unlock(dbg_s); 159 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
160} 160}
161 161
162static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) 162static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
163{ 163{
164 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 164 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
165 165
166 gk20a_dbg_session_mutex_lock(dbg_s); 166 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
167 167
168 dbg_s->dbg_events.events_enabled = false; 168 dbg_s->dbg_events.events_enabled = false;
169 dbg_s->dbg_events.num_pending_events = 0; 169 dbg_s->dbg_events.num_pending_events = 0;
170 170
171 gk20a_dbg_session_mutex_unlock(dbg_s); 171 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
172} 172}
173 173
174static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) 174static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
175{ 175{
176 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 176 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
177 177
178 gk20a_dbg_session_mutex_lock(dbg_s); 178 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
179 179
180 if (dbg_s->dbg_events.events_enabled && 180 if (dbg_s->dbg_events.events_enabled &&
181 dbg_s->dbg_events.num_pending_events > 0) 181 dbg_s->dbg_events.num_pending_events > 0)
182 dbg_s->dbg_events.num_pending_events--; 182 dbg_s->dbg_events.num_pending_events--;
183 183
184 gk20a_dbg_session_mutex_unlock(dbg_s); 184 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
185} 185}
186 186
187static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, 187static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
@@ -232,7 +232,7 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
232 232
233 poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait); 233 poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait);
234 234
235 gk20a_dbg_session_mutex_lock(dbg_s); 235 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
236 236
237 if (dbg_s->dbg_events.events_enabled && 237 if (dbg_s->dbg_events.events_enabled &&
238 dbg_s->dbg_events.num_pending_events > 0) { 238 dbg_s->dbg_events.num_pending_events > 0) {
@@ -243,7 +243,7 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
243 mask = (POLLPRI | POLLIN); 243 mask = (POLLPRI | POLLIN);
244 } 244 }
245 245
246 gk20a_dbg_session_mutex_unlock(dbg_s); 246 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
247 247
248 return mask; 248 return mask;
249} 249}
@@ -268,7 +268,7 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
268 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 268 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
269 269
270 /* guard against the session list being modified */ 270 /* guard against the session list being modified */
271 mutex_lock(&ch->dbg_s_lock); 271 nvgpu_mutex_acquire(&ch->dbg_s_lock);
272 272
273 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { 273 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) {
274 dbg_s = session_data->dbg_s; 274 dbg_s = session_data->dbg_s;
@@ -284,7 +284,7 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
284 } 284 }
285 } 285 }
286 286
287 mutex_unlock(&ch->dbg_s_lock); 287 nvgpu_mutex_release(&ch->dbg_s_lock);
288} 288}
289 289
290bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) 290bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
@@ -296,7 +296,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
296 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 296 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "");
297 297
298 /* guard against the session list being modified */ 298 /* guard against the session list being modified */
299 mutex_lock(&ch->dbg_s_lock); 299 nvgpu_mutex_acquire(&ch->dbg_s_lock);
300 300
301 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { 301 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) {
302 dbg_s = session_data->dbg_s; 302 dbg_s = session_data->dbg_s;
@@ -308,7 +308,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
308 } 308 }
309 } 309 }
310 310
311 mutex_unlock(&ch->dbg_s_lock); 311 nvgpu_mutex_release(&ch->dbg_s_lock);
312 312
313 return broadcast; 313 return broadcast;
314} 314}
@@ -321,7 +321,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
321 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 321 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "");
322 322
323 /* guard against the session list being modified */ 323 /* guard against the session list being modified */
324 mutex_lock(&ch->dbg_s_lock); 324 nvgpu_mutex_acquire(&ch->dbg_s_lock);
325 325
326 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) { 326 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) {
327 dbg_s = session_data->dbg_s; 327 dbg_s = session_data->dbg_s;
@@ -332,7 +332,7 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
332 } 332 }
333 } 333 }
334 334
335 mutex_unlock(&ch->dbg_s_lock); 335 nvgpu_mutex_release(&ch->dbg_s_lock);
336 336
337 return 0; 337 return 0;
338} 338}
@@ -407,12 +407,12 @@ static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s)
407 struct dbg_session_channel_data *ch_data, *tmp; 407 struct dbg_session_channel_data *ch_data, *tmp;
408 struct gk20a *g = dbg_s->g; 408 struct gk20a *g = dbg_s->g;
409 409
410 mutex_lock(&g->dbg_sessions_lock); 410 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
411 mutex_lock(&dbg_s->ch_list_lock); 411 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
412 list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, ch_entry) 412 list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, ch_entry)
413 dbg_unbind_single_channel_gk20a(dbg_s, ch_data); 413 dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
414 mutex_unlock(&dbg_s->ch_list_lock); 414 nvgpu_mutex_release(&dbg_s->ch_list_lock);
415 mutex_unlock(&g->dbg_sessions_lock); 415 nvgpu_mutex_release(&g->dbg_sessions_lock);
416 416
417 return 0; 417 return 0;
418} 418}
@@ -435,25 +435,25 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
435 return -EINVAL; 435 return -EINVAL;
436 } 436 }
437 437
438 mutex_lock(&dbg_s->ch_list_lock); 438 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
439 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) { 439 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) {
440 if (ch->hw_chid == ch_data->chid) { 440 if (ch->hw_chid == ch_data->chid) {
441 channel_found = true; 441 channel_found = true;
442 break; 442 break;
443 } 443 }
444 } 444 }
445 mutex_unlock(&dbg_s->ch_list_lock); 445 nvgpu_mutex_release(&dbg_s->ch_list_lock);
446 446
447 if (!channel_found) { 447 if (!channel_found) {
448 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); 448 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd);
449 return -EINVAL; 449 return -EINVAL;
450 } 450 }
451 451
452 mutex_lock(&g->dbg_sessions_lock); 452 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
453 mutex_lock(&dbg_s->ch_list_lock); 453 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
454 err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data); 454 err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
455 mutex_unlock(&dbg_s->ch_list_lock); 455 nvgpu_mutex_release(&dbg_s->ch_list_lock);
456 mutex_unlock(&g->dbg_sessions_lock); 456 nvgpu_mutex_release(&g->dbg_sessions_lock);
457 457
458 return err; 458 return err;
459} 459}
@@ -472,11 +472,11 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
472 * which called powergate/timeout disable ioctl, to be killed without 472 * which called powergate/timeout disable ioctl, to be killed without
473 * calling powergate/timeout enable ioctl 473 * calling powergate/timeout enable ioctl
474 */ 474 */
475 mutex_lock(&g->dbg_sessions_lock); 475 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
476 g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, 476 g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
477 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE); 477 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
478 nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE); 478 nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE);
479 mutex_unlock(&g->dbg_sessions_lock); 479 nvgpu_mutex_release(&g->dbg_sessions_lock);
480 480
481 kfree(dbg_s); 481 kfree(dbg_s);
482 return 0; 482 return 0;
@@ -510,8 +510,8 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
510 510
511 gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid); 511 gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid);
512 512
513 mutex_lock(&g->dbg_sessions_lock); 513 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
514 mutex_lock(&ch->dbg_s_lock); 514 nvgpu_mutex_acquire(&ch->dbg_s_lock);
515 515
516 ch_data = kzalloc(sizeof(*ch_data), GFP_KERNEL); 516 ch_data = kzalloc(sizeof(*ch_data), GFP_KERNEL);
517 if (!ch_data) { 517 if (!ch_data) {
@@ -535,12 +535,12 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
535 535
536 list_add(&session_data->dbg_s_entry, &ch->dbg_s_list); 536 list_add(&session_data->dbg_s_entry, &ch->dbg_s_list);
537 537
538 mutex_lock(&dbg_s->ch_list_lock); 538 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
539 list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list); 539 list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list);
540 mutex_unlock(&dbg_s->ch_list_lock); 540 nvgpu_mutex_release(&dbg_s->ch_list_lock);
541 541
542 mutex_unlock(&ch->dbg_s_lock); 542 nvgpu_mutex_release(&ch->dbg_s_lock);
543 mutex_unlock(&g->dbg_sessions_lock); 543 nvgpu_mutex_release(&g->dbg_sessions_lock);
544 544
545 return 0; 545 return 0;
546} 546}
@@ -591,9 +591,9 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
591 591
592 gk20a_dbg_fn("powergate mode = %d", args->enable); 592 gk20a_dbg_fn("powergate mode = %d", args->enable);
593 593
594 mutex_lock(&g->dbg_sessions_lock); 594 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
595 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); 595 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
596 mutex_unlock(&g->dbg_sessions_lock); 596 nvgpu_mutex_release(&g->dbg_sessions_lock);
597 597
598 return err; 598 return err;
599} 599}
@@ -604,9 +604,9 @@ static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
604 int status; 604 int status;
605 struct gk20a *g = get_gk20a(dbg_s->dev); 605 struct gk20a *g = get_gk20a(dbg_s->dev);
606 606
607 mutex_lock(&g->dbg_sessions_lock); 607 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
608 status = g->timeouts_enabled; 608 status = g->timeouts_enabled;
609 mutex_unlock(&g->dbg_sessions_lock); 609 nvgpu_mutex_release(&g->dbg_sessions_lock);
610 610
611 if (status) 611 if (status)
612 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE; 612 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE;
@@ -620,11 +620,11 @@ static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(
620{ 620{
621 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 621 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
622 622
623 gk20a_dbg_session_mutex_lock(dbg_s); 623 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
624 624
625 dbg_s->broadcast_stop_trigger = (args->broadcast != 0); 625 dbg_s->broadcast_stop_trigger = (args->broadcast != 0);
626 626
627 gk20a_dbg_session_mutex_unlock(dbg_s); 627 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
628 628
629 return 0; 629 return 0;
630} 630}
@@ -651,12 +651,12 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
651 if (write_size > args->sm_error_state_record_size) 651 if (write_size > args->sm_error_state_record_size)
652 write_size = args->sm_error_state_record_size; 652 write_size = args->sm_error_state_record_size;
653 653
654 mutex_lock(&g->dbg_sessions_lock); 654 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
655 err = copy_to_user((void __user *)(uintptr_t) 655 err = copy_to_user((void __user *)(uintptr_t)
656 args->sm_error_state_record_mem, 656 args->sm_error_state_record_mem,
657 sm_error_state, 657 sm_error_state,
658 write_size); 658 write_size);
659 mutex_unlock(&g->dbg_sessions_lock); 659 nvgpu_mutex_release(&g->dbg_sessions_lock);
660 if (err) { 660 if (err) {
661 gk20a_err(dev_from_gk20a(g), "copy_to_user failed!\n"); 661 gk20a_err(dev_from_gk20a(g), "copy_to_user failed!\n");
662 return err; 662 return err;
@@ -728,12 +728,12 @@ static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
728 if (read_size > args->sm_error_state_record_size) 728 if (read_size > args->sm_error_state_record_size)
729 read_size = args->sm_error_state_record_size; 729 read_size = args->sm_error_state_record_size;
730 730
731 mutex_lock(&g->dbg_sessions_lock); 731 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
732 err = copy_from_user(sm_error_state, 732 err = copy_from_user(sm_error_state,
733 (void __user *)(uintptr_t) 733 (void __user *)(uintptr_t)
734 args->sm_error_state_record_mem, 734 args->sm_error_state_record_mem,
735 read_size); 735 read_size);
736 mutex_unlock(&g->dbg_sessions_lock); 736 nvgpu_mutex_release(&g->dbg_sessions_lock);
737 if (err) { 737 if (err) {
738 err = -ENOMEM; 738 err = -ENOMEM;
739 goto err_free; 739 goto err_free;
@@ -901,7 +901,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
901 } 901 }
902 902
903 /* protect from threaded user space calls */ 903 /* protect from threaded user space calls */
904 mutex_lock(&dbg_s->ioctl_lock); 904 nvgpu_mutex_acquire(&dbg_s->ioctl_lock);
905 905
906 switch (cmd) { 906 switch (cmd) {
907 case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL: 907 case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL:
@@ -1007,7 +1007,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1007 break; 1007 break;
1008 } 1008 }
1009 1009
1010 mutex_unlock(&dbg_s->ioctl_lock); 1010 nvgpu_mutex_release(&dbg_s->ioctl_lock);
1011 1011
1012 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 1012 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
1013 1013
@@ -1032,9 +1032,9 @@ static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
1032{ 1032{
1033 int err; 1033 int err;
1034 1034
1035 mutex_lock(&gr->ctx_mutex); 1035 nvgpu_mutex_acquire(&gr->ctx_mutex);
1036 err = !gr->ctx_vars.golden_image_initialized; 1036 err = !gr->ctx_vars.golden_image_initialized;
1037 mutex_unlock(&gr->ctx_mutex); 1037 nvgpu_mutex_release(&gr->ctx_mutex);
1038 if (err) 1038 if (err)
1039 return false; 1039 return false;
1040 return true; 1040 return true;
@@ -1089,7 +1089,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1089 /* since exec_reg_ops sends methods to the ucode, it must take the 1089 /* since exec_reg_ops sends methods to the ucode, it must take the
1090 * global gpu lock to protect against mixing methods from debug sessions 1090 * global gpu lock to protect against mixing methods from debug sessions
1091 * on other channels */ 1091 * on other channels */
1092 mutex_lock(&g->dbg_sessions_lock); 1092 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1093 1093
1094 if (!dbg_s->is_pg_disabled && !gk20a_gpu_is_virtual(dbg_s->dev)) { 1094 if (!dbg_s->is_pg_disabled && !gk20a_gpu_is_virtual(dbg_s->dev)) {
1095 /* In the virtual case, the server will handle 1095 /* In the virtual case, the server will handle
@@ -1150,7 +1150,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1150 } 1150 }
1151 } 1151 }
1152 1152
1153 mutex_unlock(&g->dbg_sessions_lock); 1153 nvgpu_mutex_release(&g->dbg_sessions_lock);
1154 1154
1155 if (!err && powergate_err) 1155 if (!err && powergate_err)
1156 err = powergate_err; 1156 err = powergate_err;
@@ -1276,9 +1276,9 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
1276 gk20a_dbg_fn("%s powergate mode = %d", 1276 gk20a_dbg_fn("%s powergate mode = %d",
1277 dev_name(dbg_s->dev), args->mode); 1277 dev_name(dbg_s->dev), args->mode);
1278 1278
1279 mutex_lock(&g->dbg_sessions_lock); 1279 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1280 err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, args->mode); 1280 err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, args->mode);
1281 mutex_unlock(&g->dbg_sessions_lock); 1281 nvgpu_mutex_release(&g->dbg_sessions_lock);
1282 return err; 1282 return err;
1283} 1283}
1284 1284
@@ -1299,7 +1299,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1299 } 1299 }
1300 1300
1301 /* Take the global lock, since we'll be doing global regops */ 1301 /* Take the global lock, since we'll be doing global regops */
1302 mutex_lock(&g->dbg_sessions_lock); 1302 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1303 1303
1304 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1304 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1305 if (!ch_gk20a) { 1305 if (!ch_gk20a) {
@@ -1319,7 +1319,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1319 1319
1320 err = g->ops.regops.apply_smpc_war(dbg_s); 1320 err = g->ops.regops.apply_smpc_war(dbg_s);
1321 clean_up: 1321 clean_up:
1322 mutex_unlock(&g->dbg_sessions_lock); 1322 nvgpu_mutex_release(&g->dbg_sessions_lock);
1323 gk20a_idle(g->dev); 1323 gk20a_idle(g->dev);
1324 return err; 1324 return err;
1325} 1325}
@@ -1341,7 +1341,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1341 } 1341 }
1342 1342
1343 /* Take the global lock, since we'll be doing global regops */ 1343 /* Take the global lock, since we'll be doing global regops */
1344 mutex_lock(&g->dbg_sessions_lock); 1344 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1345 1345
1346 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1346 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1347 if (!ch_gk20a) { 1347 if (!ch_gk20a) {
@@ -1361,7 +1361,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1361 * added here with gk20a being deprecated 1361 * added here with gk20a being deprecated
1362 */ 1362 */
1363 clean_up: 1363 clean_up:
1364 mutex_unlock(&g->dbg_sessions_lock); 1364 nvgpu_mutex_release(&g->dbg_sessions_lock);
1365 gk20a_idle(g->dev); 1365 gk20a_idle(g->dev);
1366 return err; 1366 return err;
1367} 1367}
@@ -1386,7 +1386,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1386 return err; 1386 return err;
1387 } 1387 }
1388 1388
1389 mutex_lock(&g->dbg_sessions_lock); 1389 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1390 1390
1391 /* Suspend GPU context switching */ 1391 /* Suspend GPU context switching */
1392 err = gr_gk20a_disable_ctxsw(g); 1392 err = gr_gk20a_disable_ctxsw(g);
@@ -1411,7 +1411,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1411 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); 1411 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
1412 1412
1413clean_up: 1413clean_up:
1414 mutex_unlock(&g->dbg_sessions_lock); 1414 nvgpu_mutex_release(&g->dbg_sessions_lock);
1415 gk20a_idle(g->dev); 1415 gk20a_idle(g->dev);
1416 1416
1417 return err; 1417 return err;