summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/clk/clk_arb.c
diff options
context:
space:
mode:
authorDavid Nieto <dmartineznie@nvidia.com>2017-02-13 14:22:59 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-20 19:39:50 -0400
commit469308becaff326da02fcf791e803e812e1cf9f8 (patch)
tree2acc6d432a7a6023c3d6bb034df8ac0e9cb6bfbf /drivers/gpu/nvgpu/clk/clk_arb.c
parent50f371f891c889c782187036c31132fa94c573ac (diff)
gpu: nvgpu: fix arbiter teardown on PCI
The driver is not properly tearing down the arbiter on the PCI driver unload. This change makes sure that the workqueues are drained before tearing down the driver bug 200277762 JIRA: EVLR-1023 Change-Id: If98fd00e27949ba1569dd26e2af02b75897231a7 Signed-off-by: David Nieto <dmartineznie@nvidia.com> Reviewed-on: http://git-master/r/1320147 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/clk/clk_arb.c')
-rw-r--r--drivers/gpu/nvgpu/clk/clk_arb.c67
1 files changed, 46 insertions, 21 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk_arb.c b/drivers/gpu/nvgpu/clk/clk_arb.c
index 44b442d8..30447d3e 100644
--- a/drivers/gpu/nvgpu/clk/clk_arb.c
+++ b/drivers/gpu/nvgpu/clk/clk_arb.c
@@ -403,7 +403,8 @@ void nvgpu_clk_arb_schedule_alarm(struct gk20a *g, u32 alarm)
403 struct nvgpu_clk_arb *arb = g->clk_arb; 403 struct nvgpu_clk_arb *arb = g->clk_arb;
404 404
405 nvgpu_clk_arb_set_global_alarm(g, alarm); 405 nvgpu_clk_arb_set_global_alarm(g, alarm);
406 queue_work(arb->update_work_queue, &arb->update_fn_work); 406 if (arb->update_work_queue)
407 queue_work(arb->update_work_queue, &arb->update_fn_work);
407} 408}
408 409
409static void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm) 410static void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm)
@@ -455,8 +456,30 @@ static void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm)
455 456
456void nvgpu_clk_arb_cleanup_arbiter(struct gk20a *g) 457void nvgpu_clk_arb_cleanup_arbiter(struct gk20a *g)
457{ 458{
459 struct nvgpu_clk_arb *arb = g->clk_arb;
460 int index;
461
462 if (arb) {
463 cancel_work_sync(&arb->vf_table_fn_work);
464 destroy_workqueue(arb->vf_table_work_queue);
465 arb->vf_table_work_queue = NULL;
466
467 cancel_work_sync(&arb->update_fn_work);
468 destroy_workqueue(arb->update_work_queue);
469 arb->update_work_queue = NULL;
470
471 kfree(arb->gpc2clk_f_points);
472 kfree(arb->mclk_f_points);
473
474 for (index = 0; index < 2; index++) {
475 kfree(arb->vf_table_pool[index].gpc2clk_points);
476 kfree(arb->vf_table_pool[index].mclk_points);
477 }
478 }
479
458 nvgpu_mutex_destroy(&g->clk_arb->pstate_lock); 480 nvgpu_mutex_destroy(&g->clk_arb->pstate_lock);
459 kfree(g->clk_arb); 481 kfree(g->clk_arb);
482 g->clk_arb = NULL;
460} 483}
461 484
462static int nvgpu_clk_arb_install_fd(struct gk20a *g, 485static int nvgpu_clk_arb_install_fd(struct gk20a *g,
@@ -575,9 +598,11 @@ static void nvgpu_clk_arb_free_session(struct kref *refcount)
575 598
576 gk20a_dbg_fn(""); 599 gk20a_dbg_fn("");
577 600
578 nvgpu_spinlock_acquire(&arb->sessions_lock); 601 if (arb) {
579 list_del_rcu(&session->link); 602 nvgpu_spinlock_acquire(&arb->sessions_lock);
580 nvgpu_spinlock_release(&arb->sessions_lock); 603 list_del_rcu(&session->link);
604 nvgpu_spinlock_release(&arb->sessions_lock);
605 }
581 606
582 head = llist_del_all(&session->targets); 607 head = llist_del_all(&session->targets);
583 llist_for_each_entry_safe(dev, tmp, head, node) { 608 llist_for_each_entry_safe(dev, tmp, head, node) {
@@ -596,8 +621,8 @@ void nvgpu_clk_arb_release_session(struct gk20a *g,
596 621
597 session->zombie = true; 622 session->zombie = true;
598 kref_put(&session->refcount, nvgpu_clk_arb_free_session); 623 kref_put(&session->refcount, nvgpu_clk_arb_free_session);
599 624 if (arb && arb->update_work_queue)
600 queue_work(arb->update_work_queue, &arb->update_fn_work); 625 queue_work(arb->update_work_queue, &arb->update_fn_work);
601} 626}
602 627
603int nvgpu_clk_arb_install_event_fd(struct gk20a *g, 628int nvgpu_clk_arb_install_event_fd(struct gk20a *g,
@@ -964,8 +989,8 @@ exit_vf_table:
964 if (status < 0) 989 if (status < 0)
965 nvgpu_clk_arb_set_global_alarm(g, 990 nvgpu_clk_arb_set_global_alarm(g,
966 EVENT(ALARM_VF_TABLE_UPDATE_FAILED)); 991 EVENT(ALARM_VF_TABLE_UPDATE_FAILED));
967 992 if (arb->update_work_queue)
968 queue_work(arb->update_work_queue, &arb->update_fn_work); 993 queue_work(arb->update_work_queue, &arb->update_fn_work);
969 994
970 return status; 995 return status;
971} 996}
@@ -973,8 +998,8 @@ exit_vf_table:
973void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g) 998void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g)
974{ 999{
975 struct nvgpu_clk_arb *arb = g->clk_arb; 1000 struct nvgpu_clk_arb *arb = g->clk_arb;
976 1001 if (arb->vf_table_work_queue)
977 queue_work(arb->vf_table_work_queue, &arb->vf_table_fn_work); 1002 queue_work(arb->vf_table_work_queue, &arb->vf_table_fn_work);
978} 1003}
979 1004
980static void nvgpu_clk_arb_run_vf_table_cb(struct work_struct *work) 1005static void nvgpu_clk_arb_run_vf_table_cb(struct work_struct *work)
@@ -991,8 +1016,9 @@ static void nvgpu_clk_arb_run_vf_table_cb(struct work_struct *work)
991 "failed to cache VF table"); 1016 "failed to cache VF table");
992 nvgpu_clk_arb_set_global_alarm(g, 1017 nvgpu_clk_arb_set_global_alarm(g,
993 EVENT(ALARM_VF_TABLE_UPDATE_FAILED)); 1018 EVENT(ALARM_VF_TABLE_UPDATE_FAILED));
994 1019 if (arb->update_work_queue)
995 queue_work(arb->update_work_queue, &arb->update_fn_work); 1020 queue_work(arb->update_work_queue,
1021 &arb->update_fn_work);
996 1022
997 return; 1023 return;
998 } 1024 }
@@ -1490,8 +1516,8 @@ int nvgpu_clk_arb_commit_request_fd(struct gk20a *g,
1490 } 1516 }
1491 kref_get(&dev->refcount); 1517 kref_get(&dev->refcount);
1492 llist_add(&dev->node, &session->targets); 1518 llist_add(&dev->node, &session->targets);
1493 1519 if (arb->update_work_queue)
1494 queue_work(arb->update_work_queue, &arb->update_fn_work); 1520 queue_work(arb->update_work_queue, &arb->update_fn_work);
1495 1521
1496fdput_fd: 1522fdput_fd:
1497 fdput(fd); 1523 fdput(fd);
@@ -1568,15 +1594,12 @@ static int nvgpu_clk_arb_release_completion_dev(struct inode *inode,
1568{ 1594{
1569 struct nvgpu_clk_dev *dev = filp->private_data; 1595 struct nvgpu_clk_dev *dev = filp->private_data;
1570 struct nvgpu_clk_session *session = dev->session; 1596 struct nvgpu_clk_session *session = dev->session;
1571 struct nvgpu_clk_arb *arb;
1572 1597
1573 arb = session->g->clk_arb;
1574 1598
1575 gk20a_dbg_fn(""); 1599 gk20a_dbg_fn("");
1576 1600
1577 kref_put(&session->refcount, nvgpu_clk_arb_free_session); 1601 kref_put(&session->refcount, nvgpu_clk_arb_free_session);
1578 kref_put(&dev->refcount, nvgpu_clk_arb_free_fd); 1602 kref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
1579
1580 return 0; 1603 return 0;
1581} 1604}
1582 1605
@@ -1591,15 +1614,17 @@ static int nvgpu_clk_arb_release_event_dev(struct inode *inode,
1591 1614
1592 gk20a_dbg_fn(""); 1615 gk20a_dbg_fn("");
1593 1616
1594 nvgpu_spinlock_acquire(&arb->users_lock); 1617 if (arb) {
1595 list_del_rcu(&dev->link); 1618 nvgpu_spinlock_acquire(&arb->users_lock);
1596 nvgpu_spinlock_release(&arb->users_lock); 1619 list_del_rcu(&dev->link);
1620 nvgpu_spinlock_release(&arb->users_lock);
1621 }
1597 1622
1598 synchronize_rcu(); 1623 synchronize_rcu();
1599 kref_put(&session->refcount, nvgpu_clk_arb_free_session); 1624 kref_put(&session->refcount, nvgpu_clk_arb_free_session);
1600 1625
1601 nvgpu_clk_notification_queue_free(&dev->queue); 1626 nvgpu_clk_notification_queue_free(&dev->queue);
1602 kfree(dev); 1627 kref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
1603 1628
1604 return 0; 1629 return 0;
1605} 1630}