diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.c | 66 |
1 files changed, 65 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index 5ed854d3..086f756b 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c | |||
@@ -1732,16 +1732,67 @@ void gk20a_busy_noresume(struct device *dev) | |||
1732 | pm_runtime_get_noresume(dev); | 1732 | pm_runtime_get_noresume(dev); |
1733 | } | 1733 | } |
1734 | 1734 | ||
1735 | /* | ||
1736 | * Start the process for unloading the driver. Set g->driver_is_dying. | ||
1737 | */ | ||
1738 | void gk20a_driver_start_unload(struct gk20a *g) | ||
1739 | { | ||
1740 | g->driver_is_dying = 1; | ||
1741 | } | ||
1742 | |||
1743 | int gk20a_wait_for_idle(struct device *dev) | ||
1744 | { | ||
1745 | struct gk20a *g = get_gk20a(dev); | ||
1746 | int wait_length = 150; /* 3 second overall max wait. */ | ||
1747 | |||
1748 | if (!g) | ||
1749 | return -ENODEV; | ||
1750 | |||
1751 | while (atomic_read(&g->usage_count) && wait_length-- >= 0) | ||
1752 | msleep(20); | ||
1753 | |||
1754 | if (wait_length < 0) { | ||
1755 | pr_warn("%s: Timed out waiting for idle (%d)!\n", | ||
1756 | __func__, atomic_read(&g->usage_count)); | ||
1757 | return -ETIMEDOUT; | ||
1758 | } | ||
1759 | |||
1760 | return 0; | ||
1761 | } | ||
1762 | |||
1763 | /* | ||
1764 | * Check if the device can go busy. Basically if the driver is currently | ||
1765 | * in the process of dying then do not let new places make the driver busy. | ||
1766 | */ | ||
1767 | static int gk20a_can_busy(struct gk20a *g) | ||
1768 | { | ||
1769 | if (g->driver_is_dying) | ||
1770 | return 0; | ||
1771 | return 1; | ||
1772 | } | ||
1773 | |||
1735 | int gk20a_busy(struct device *dev) | 1774 | int gk20a_busy(struct device *dev) |
1736 | { | 1775 | { |
1737 | int ret = 0; | 1776 | int ret = 0; |
1738 | struct gk20a *g = get_gk20a(dev); | 1777 | struct gk20a *g = get_gk20a(dev); |
1778 | struct gk20a_platform *platform; | ||
1779 | |||
1780 | if (!dev) | ||
1781 | return -ENODEV; | ||
1782 | |||
1783 | platform = gk20a_get_platform(dev); | ||
1784 | |||
1785 | if (!g || !gk20a_can_busy(g)) | ||
1786 | return -ENODEV; | ||
1787 | |||
1788 | atomic_inc(&g->usage_count); | ||
1739 | 1789 | ||
1740 | down_read(&g->busy_lock); | 1790 | down_read(&g->busy_lock); |
1741 | if (pm_runtime_enabled(dev)) { | 1791 | if (pm_runtime_enabled(dev)) { |
1742 | ret = pm_runtime_get_sync(dev); | 1792 | ret = pm_runtime_get_sync(dev); |
1743 | if (ret < 0) { | 1793 | if (ret < 0) { |
1744 | pm_runtime_put_noidle(dev); | 1794 | pm_runtime_put_noidle(dev); |
1795 | atomic_dec(&g->usage_count); | ||
1745 | goto fail; | 1796 | goto fail; |
1746 | } | 1797 | } |
1747 | } else { | 1798 | } else { |
@@ -1749,8 +1800,10 @@ int gk20a_busy(struct device *dev) | |||
1749 | ret = gk20a_gpu_is_virtual(dev) ? | 1800 | ret = gk20a_gpu_is_virtual(dev) ? |
1750 | vgpu_pm_finalize_poweron(dev) | 1801 | vgpu_pm_finalize_poweron(dev) |
1751 | : gk20a_pm_finalize_poweron(dev); | 1802 | : gk20a_pm_finalize_poweron(dev); |
1752 | if (ret) | 1803 | if (ret) { |
1804 | atomic_dec(&g->usage_count); | ||
1753 | goto fail; | 1805 | goto fail; |
1806 | } | ||
1754 | } | 1807 | } |
1755 | } | 1808 | } |
1756 | 1809 | ||
@@ -1769,6 +1822,17 @@ void gk20a_idle_nosuspend(struct device *dev) | |||
1769 | 1822 | ||
1770 | void gk20a_idle(struct device *dev) | 1823 | void gk20a_idle(struct device *dev) |
1771 | { | 1824 | { |
1825 | struct gk20a_platform *platform; | ||
1826 | struct gk20a *g; | ||
1827 | |||
1828 | if (!dev) | ||
1829 | return; | ||
1830 | |||
1831 | g = get_gk20a(dev); | ||
1832 | platform = gk20a_get_platform(dev); | ||
1833 | |||
1834 | atomic_dec(&g->usage_count); | ||
1835 | |||
1772 | if (pm_runtime_enabled(dev)) { | 1836 | if (pm_runtime_enabled(dev)) { |
1773 | #ifdef CONFIG_PM | 1837 | #ifdef CONFIG_PM |
1774 | if (atomic_read(&dev->power.usage_count) == 1) | 1838 | if (atomic_read(&dev->power.usage_count) == 1) |