summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-05-14 08:22:49 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:05 -0400
commit6b33379c55a8368ce9e5ed1381f9aeeebe383dfe (patch)
tree625ae2366c0b21d9c4b18255f691f506debfcbfb /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parentcd13ee4aafcdb0206078e9e41aca53107235bbed (diff)
gpu: nvgpu: Rewrite PMU boot-up sequence
Rewrite PMU boot sequence as a state machine. At PMU power-up send initial messages, and reset state machine. At each reply from PMU, do the next stage of PMU boot and set state. As now PMU and FECS boot are independent, we need to ensure engine idle before saving ZBC. Change-Id: I1ea747ab794ef08f1784eeabfdae7655d585ff21 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/410205
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c170
1 files changed, 67 insertions, 103 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index bccd4100..b784b9a6 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -28,6 +28,7 @@
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29 29
30#include "gk20a.h" 30#include "gk20a.h"
31#include "gr_gk20a.h"
31#include "hw_mc_gk20a.h" 32#include "hw_mc_gk20a.h"
32#include "hw_pwr_gk20a.h" 33#include "hw_pwr_gk20a.h"
33#include "hw_top_gk20a.h" 34#include "hw_top_gk20a.h"
@@ -40,8 +41,7 @@
40static void pmu_dump_falcon_stats(struct pmu_gk20a *pmu); 41static void pmu_dump_falcon_stats(struct pmu_gk20a *pmu);
41static int gk20a_pmu_get_elpg_residency_gating(struct gk20a *g, 42static int gk20a_pmu_get_elpg_residency_gating(struct gk20a *g,
42 u32 *ingating_time, u32 *ungating_time, u32 *gating_cnt); 43 u32 *ingating_time, u32 *ungating_time, u32 *gating_cnt);
43static void gk20a_init_pmu_setup_hw2_workqueue(struct work_struct *work); 44static void pmu_setup_hw(struct work_struct *work);
44static void pmu_save_zbc(struct gk20a *g, u32 entries);
45static void ap_callback_init_and_enable_ctrl( 45static void ap_callback_init_and_enable_ctrl(
46 struct gk20a *g, struct pmu_msg *msg, 46 struct gk20a *g, struct pmu_msg *msg,
47 void *param, u32 seq_desc, u32 status); 47 void *param, u32 seq_desc, u32 status);
@@ -1582,8 +1582,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
1582 pmu->ucode_image = (u32 *)((u8 *)pmu->desc + 1582 pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
1583 pmu->desc->descriptor_size); 1583 pmu->desc->descriptor_size);
1584 1584
1585 1585 INIT_WORK(&pmu->pg_init, pmu_setup_hw);
1586 INIT_WORK(&pmu->pg_init, gk20a_init_pmu_setup_hw2_workqueue);
1587 1586
1588 dma_set_attr(DMA_ATTR_READ_ONLY, &attrs); 1587 dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);
1589 pmu->ucode.cpuva = dma_alloc_attrs(d, GK20A_PMU_UCODE_SIZE_MAX, 1588 pmu->ucode.cpuva = dma_alloc_attrs(d, GK20A_PMU_UCODE_SIZE_MAX,
@@ -1607,7 +1606,6 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
1607 } 1606 }
1608 1607
1609 pmu->seq_buf.iova = iova; 1608 pmu->seq_buf.iova = iova;
1610 init_waitqueue_head(&pmu->pg_wq);
1611 1609
1612 err = gk20a_get_sgtable(d, &sgt_pmu_ucode, 1610 err = gk20a_get_sgtable(d, &sgt_pmu_ucode,
1613 pmu->ucode.cpuva, 1611 pmu->ucode.cpuva,
@@ -1668,6 +1666,8 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
1668 gk20a_free_sgtable(&sgt_pmu_ucode); 1666 gk20a_free_sgtable(&sgt_pmu_ucode);
1669 gk20a_free_sgtable(&sgt_seq_buf); 1667 gk20a_free_sgtable(&sgt_seq_buf);
1670 1668
1669 pmu->sw_ready = true;
1670
1671skip_init: 1671skip_init:
1672 mutex_init(&pmu->elpg_mutex); 1672 mutex_init(&pmu->elpg_mutex);
1673 mutex_init(&pmu->isr_mutex); 1673 mutex_init(&pmu->isr_mutex);
@@ -1741,7 +1741,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
1741 } 1741 }
1742 1742
1743 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED); 1743 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
1744 wake_up(&pmu->pg_wq); 1744 schedule_work(&pmu->pg_init);
1745} 1745}
1746 1746
1747int gk20a_init_pmu_setup_hw1(struct gk20a *g) 1747int gk20a_init_pmu_setup_hw1(struct gk20a *g)
@@ -1781,15 +1781,37 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g)
1781static int gk20a_aelpg_init(struct gk20a *g); 1781static int gk20a_aelpg_init(struct gk20a *g);
1782static int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id); 1782static int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
1783 1783
1784static void pmu_setup_hw_load_zbc(struct gk20a *g);
1785static void pmu_setup_hw_enable_elpg(struct gk20a *g);
1784 1786
1785static void gk20a_init_pmu_setup_hw2_workqueue(struct work_struct *work) 1787static void pmu_setup_hw(struct work_struct *work)
1786{ 1788{
1787 struct pmu_gk20a *pmu = container_of(work, struct pmu_gk20a, pg_init); 1789 struct pmu_gk20a *pmu = container_of(work, struct pmu_gk20a, pg_init);
1788 struct gk20a *g = pmu->g; 1790 struct gk20a *g = pmu->g;
1789 gk20a_init_pmu_setup_hw2(g); 1791
1792 switch (pmu->pmu_state) {
1793 case PMU_STATE_ELPG_BOOTED:
1794 gk20a_dbg_pmu("elpg booted");
1795 gk20a_init_pmu_bind_fecs(g);
1796 break;
1797 case PMU_STATE_LOADING_PG_BUF:
1798 gk20a_dbg_pmu("loaded pg buf");
1799 pmu_setup_hw_load_zbc(g);
1800 break;
1801 case PMU_STATE_LOADING_ZBC:
1802 gk20a_dbg_pmu("loaded zbc");
1803 pmu_setup_hw_enable_elpg(g);
1804 break;
1805 case PMU_STATE_STARTED:
1806 gk20a_dbg_pmu("PMU booted");
1807 break;
1808 default:
1809 gk20a_dbg_pmu("invalid state");
1810 break;
1811 }
1790} 1812}
1791 1813
1792int gk20a_init_pmu_setup_hw2(struct gk20a *g) 1814int gk20a_init_pmu_bind_fecs(struct gk20a *g)
1793{ 1815{
1794 struct pmu_gk20a *pmu = &g->pmu; 1816 struct pmu_gk20a *pmu = &g->pmu;
1795 struct mm_gk20a *mm = &g->mm; 1817 struct mm_gk20a *mm = &g->mm;
@@ -1797,19 +1819,15 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
1797 struct device *d = dev_from_gk20a(g); 1819 struct device *d = dev_from_gk20a(g);
1798 struct pmu_cmd cmd; 1820 struct pmu_cmd cmd;
1799 u32 desc; 1821 u32 desc;
1800 long remain;
1801 int err; 1822 int err;
1802 bool status;
1803 u32 size; 1823 u32 size;
1804 struct sg_table *sgt_pg_buf; 1824 struct sg_table *sgt_pg_buf;
1805 dma_addr_t iova; 1825 dma_addr_t iova;
1806 1826
1807 gk20a_dbg_fn(""); 1827 gk20a_dbg_fn("");
1808 1828
1809 if (!support_gk20a_pmu())
1810 return 0;
1811
1812 size = 0; 1829 size = 0;
1830 gk20a_gr_wait_initialized(g);
1813 err = gr_gk20a_fecs_get_reglist_img_size(g, &size); 1831 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
1814 if (err) { 1832 if (err) {
1815 gk20a_err(dev_from_gk20a(g), 1833 gk20a_err(dev_from_gk20a(g),
@@ -1817,14 +1835,13 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
1817 return err; 1835 return err;
1818 } 1836 }
1819 1837
1820 if (!pmu->sw_ready) { 1838 if (!pmu->pg_buf.cpuva) {
1821 pmu->pg_buf.cpuva = dma_alloc_coherent(d, size, 1839 pmu->pg_buf.cpuva = dma_alloc_coherent(d, size,
1822 &iova, 1840 &iova,
1823 GFP_KERNEL); 1841 GFP_KERNEL);
1824 if (!pmu->pg_buf.cpuva) { 1842 if (!pmu->pg_buf.cpuva) {
1825 gk20a_err(d, "failed to allocate memory\n"); 1843 gk20a_err(d, "failed to allocate memory\n");
1826 err = -ENOMEM; 1844 return -ENOMEM;
1827 goto err;
1828 } 1845 }
1829 1846
1830 pmu->pg_buf.iova = iova; 1847 pmu->pg_buf.iova = iova;
@@ -1853,30 +1870,6 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
1853 gk20a_free_sgtable(&sgt_pg_buf); 1870 gk20a_free_sgtable(&sgt_pg_buf);
1854 } 1871 }
1855 1872
1856 /*
1857 * This is the actual point at which sw setup is complete, so set the
1858 * sw_ready flag here.
1859 */
1860 pmu->sw_ready = true;
1861
1862 /* TBD: acquire pmu hw mutex */
1863
1864 /* TBD: post reset again? */
1865
1866 /* PMU_INIT message handler will send PG_INIT */
1867 remain = wait_event_timeout(
1868 pmu->pg_wq,
1869 (status = (pmu->elpg_ready &&
1870 pmu->stat_dmem_offset != 0 &&
1871 pmu->elpg_stat == PMU_ELPG_STAT_OFF)),
1872 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
1873 if (status == 0) {
1874 gk20a_err(dev_from_gk20a(g),
1875 "PG_INIT_ACK failed, remaining timeout : 0x%lx", remain);
1876 pmu_dump_falcon_stats(pmu);
1877 return -EBUSY;
1878 }
1879
1880 err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa); 1873 err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa);
1881 if (err) { 1874 if (err) {
1882 gk20a_err(dev_from_gk20a(g), 1875 gk20a_err(dev_from_gk20a(g),
@@ -1906,17 +1899,24 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
1906 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); 1899 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
1907 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 1900 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
1908 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 1901 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
1902 pmu->pmu_state = PMU_STATE_LOADING_PG_BUF;
1903 return err;
1909 1904
1910 remain = wait_event_timeout( 1905err_free_sgtable:
1911 pmu->pg_wq, 1906 gk20a_free_sgtable(&sgt_pg_buf);
1912 pmu->buf_loaded, 1907err_free_pg_buf:
1913 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g))); 1908 dma_free_coherent(d, size,
1914 if (!pmu->buf_loaded) { 1909 pmu->pg_buf.cpuva, pmu->pg_buf.iova);
1915 gk20a_err(dev_from_gk20a(g), 1910 pmu->pg_buf.cpuva = NULL;
1916 "PGENG FECS buffer load failed, remaining timeout : 0x%lx", 1911 pmu->pg_buf.iova = 0;
1917 remain); 1912 return err;
1918 return -EBUSY; 1913}
1919 } 1914
1915static void pmu_setup_hw_load_zbc(struct gk20a *g)
1916{
1917 struct pmu_gk20a *pmu = &g->pmu;
1918 struct pmu_cmd cmd;
1919 u32 desc;
1920 1920
1921 memset(&cmd, 0, sizeof(struct pmu_cmd)); 1921 memset(&cmd, 0, sizeof(struct pmu_cmd));
1922 cmd.hdr.unit_id = PMU_UNIT_PG; 1922 cmd.hdr.unit_id = PMU_UNIT_PG;
@@ -1933,17 +1933,12 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
1933 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC"); 1933 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
1934 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 1934 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
1935 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 1935 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
1936 pmu->pmu_state = PMU_STATE_LOADING_ZBC;
1937}
1936 1938
1937 remain = wait_event_timeout( 1939static void pmu_setup_hw_enable_elpg(struct gk20a *g)
1938 pmu->pg_wq, 1940{
1939 pmu->buf_loaded, 1941 struct pmu_gk20a *pmu = &g->pmu;
1940 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
1941 if (!pmu->buf_loaded) {
1942 gk20a_err(dev_from_gk20a(g),
1943 "PGENG ZBC buffer load failed, remaining timeout 0x%lx",
1944 remain);
1945 return -EBUSY;
1946 }
1947 1942
1948 /* 1943 /*
1949 * FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to 1944 * FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to
@@ -1954,17 +1949,11 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
1954 gk20a_writel(g, 0x10a164, 0x109ff); 1949 gk20a_writel(g, 0x10a164, 0x109ff);
1955 1950
1956 pmu->initialized = true; 1951 pmu->initialized = true;
1957 1952 pmu->pmu_state = PMU_STATE_STARTED;
1958 /*
1959 * We can't guarantee that gr code to enable ELPG will be
1960 * invoked, so we explicitly call disable-enable here
1961 * to enable elpg.
1962 */
1963 gk20a_pmu_disable_elpg(g);
1964 1953
1965 pmu->zbc_ready = true; 1954 pmu->zbc_ready = true;
1966 /* Save zbc table after PMU is initialized. */ 1955 /* Save zbc table after PMU is initialized. */
1967 pmu_save_zbc(g, 0xf); 1956 gr_gk20a_pmu_save_zbc(g, 0xf);
1968 1957
1969 if (g->elpg_enabled) 1958 if (g->elpg_enabled)
1970 gk20a_pmu_enable_elpg(g); 1959 gk20a_pmu_enable_elpg(g);
@@ -1976,18 +1965,6 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
1976 gk20a_aelpg_init(g); 1965 gk20a_aelpg_init(g);
1977 gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS); 1966 gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
1978 } 1967 }
1979
1980 return 0;
1981
1982 err_free_sgtable:
1983 gk20a_free_sgtable(&sgt_pg_buf);
1984 err_free_pg_buf:
1985 dma_free_coherent(d, size,
1986 pmu->pg_buf.cpuva, pmu->pg_buf.iova);
1987 pmu->pg_buf.cpuva = NULL;
1988 pmu->pg_buf.iova = 0;
1989 err:
1990 return err;
1991} 1968}
1992 1969
1993int gk20a_init_pmu_support(struct gk20a *g) 1970int gk20a_init_pmu_support(struct gk20a *g)
@@ -2036,18 +2013,17 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
2036 switch (elpg_msg->msg) { 2013 switch (elpg_msg->msg) {
2037 case PMU_PG_ELPG_MSG_INIT_ACK: 2014 case PMU_PG_ELPG_MSG_INIT_ACK:
2038 gk20a_dbg_pmu("INIT_PG is acknowledged from PMU"); 2015 gk20a_dbg_pmu("INIT_PG is acknowledged from PMU");
2039 pmu->elpg_ready = true;
2040 wake_up(&pmu->pg_wq);
2041 break; 2016 break;
2042 case PMU_PG_ELPG_MSG_ALLOW_ACK: 2017 case PMU_PG_ELPG_MSG_ALLOW_ACK:
2043 gk20a_dbg_pmu("ALLOW is acknowledged from PMU"); 2018 gk20a_dbg_pmu("ALLOW is acknowledged from PMU");
2044 pmu->elpg_stat = PMU_ELPG_STAT_ON; 2019 pmu->elpg_stat = PMU_ELPG_STAT_ON;
2045 wake_up(&pmu->pg_wq);
2046 break; 2020 break;
2047 case PMU_PG_ELPG_MSG_DISALLOW_ACK: 2021 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
2048 gk20a_dbg_pmu("DISALLOW is acknowledged from PMU"); 2022 gk20a_dbg_pmu("DISALLOW is acknowledged from PMU");
2049 pmu->elpg_stat = PMU_ELPG_STAT_OFF; 2023 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
2050 wake_up(&pmu->pg_wq); 2024 if (pmu->pmu_state == PMU_STATE_STARTING)
2025 pmu->pmu_state = PMU_STATE_ELPG_BOOTED;
2026 schedule_work(&pmu->pg_init);
2051 break; 2027 break;
2052 default: 2028 default:
2053 gk20a_err(dev_from_gk20a(g), 2029 gk20a_err(dev_from_gk20a(g),
@@ -2074,7 +2050,6 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
2074 case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET: 2050 case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
2075 gk20a_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU"); 2051 gk20a_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
2076 pmu->stat_dmem_offset = msg->msg.pg.stat.data; 2052 pmu->stat_dmem_offset = msg->msg.pg.stat.data;
2077 wake_up(&pmu->pg_wq);
2078 break; 2053 break;
2079 default: 2054 default:
2080 break; 2055 break;
@@ -2131,7 +2106,7 @@ static int pmu_init_powergating(struct pmu_gk20a *pmu)
2131 2106
2132 /* disallow ELPG initially 2107 /* disallow ELPG initially
2133 PMU ucode requires a disallow cmd before allow cmd */ 2108 PMU ucode requires a disallow cmd before allow cmd */
2134 pmu->elpg_stat = PMU_ELPG_STAT_ON; /* set for wait_event PMU_ELPG_STAT_OFF */ 2109 pmu->elpg_stat = PMU_ELPG_STAT_OFF; /* set for wait_event PMU_ELPG_STAT_OFF */
2135 memset(&cmd, 0, sizeof(struct pmu_cmd)); 2110 memset(&cmd, 0, sizeof(struct pmu_cmd));
2136 cmd.hdr.unit_id = PMU_UNIT_PG; 2111 cmd.hdr.unit_id = PMU_UNIT_PG;
2137 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd); 2112 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
@@ -2144,7 +2119,9 @@ static int pmu_init_powergating(struct pmu_gk20a *pmu)
2144 pmu_handle_pg_elpg_msg, pmu, &seq, ~0); 2119 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
2145 2120
2146 /* start with elpg disabled until first enable call */ 2121 /* start with elpg disabled until first enable call */
2147 pmu->elpg_refcnt = 1; 2122 pmu->elpg_refcnt = 0;
2123
2124 pmu->pmu_state = PMU_STATE_STARTING;
2148 2125
2149 return 0; 2126 return 0;
2150} 2127}
@@ -2482,7 +2459,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
2482 pmu->zbc_save_done = 1; 2459 pmu->zbc_save_done = 1;
2483} 2460}
2484 2461
2485static void pmu_save_zbc(struct gk20a *g, u32 entries) 2462void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
2486{ 2463{
2487 struct pmu_gk20a *pmu = &g->pmu; 2464 struct pmu_gk20a *pmu = &g->pmu;
2488 struct pmu_cmd cmd; 2465 struct pmu_cmd cmd;
@@ -2508,12 +2485,6 @@ static void pmu_save_zbc(struct gk20a *g, u32 entries)
2508 gk20a_err(dev_from_gk20a(g), "ZBC save timeout"); 2485 gk20a_err(dev_from_gk20a(g), "ZBC save timeout");
2509} 2486}
2510 2487
2511void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
2512{
2513 if (g->pmu.zbc_ready)
2514 pmu_save_zbc(g, entries);
2515}
2516
2517static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) 2488static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu)
2518{ 2489{
2519 struct gk20a *g = pmu->g; 2490 struct gk20a *g = pmu->g;
@@ -3195,9 +3166,6 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
3195 3166
3196 gk20a_dbg_fn(""); 3167 gk20a_dbg_fn("");
3197 3168
3198 if (!pmu->elpg_ready || !pmu->initialized)
3199 goto exit;
3200
3201 mutex_lock(&pmu->elpg_mutex); 3169 mutex_lock(&pmu->elpg_mutex);
3202 3170
3203 pmu->elpg_refcnt++; 3171 pmu->elpg_refcnt++;
@@ -3225,7 +3193,6 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
3225 3193
3226exit_unlock: 3194exit_unlock:
3227 mutex_unlock(&pmu->elpg_mutex); 3195 mutex_unlock(&pmu->elpg_mutex);
3228exit:
3229 gk20a_dbg_fn("done"); 3196 gk20a_dbg_fn("done");
3230 return ret; 3197 return ret;
3231} 3198}
@@ -3239,9 +3206,6 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
3239 3206
3240 gk20a_dbg_fn(""); 3207 gk20a_dbg_fn("");
3241 3208
3242 if (!pmu->elpg_ready || !pmu->initialized)
3243 return 0;
3244
3245 mutex_lock(&pmu->elpg_mutex); 3209 mutex_lock(&pmu->elpg_mutex);
3246 3210
3247 pmu->elpg_refcnt--; 3211 pmu->elpg_refcnt--;
@@ -3353,10 +3317,10 @@ int gk20a_pmu_destroy(struct gk20a *g)
3353 g->pg_gating_cnt += gating_cnt; 3317 g->pg_gating_cnt += gating_cnt;
3354 3318
3355 pmu_enable(pmu, false); 3319 pmu_enable(pmu, false);
3320 pmu->pmu_state = PMU_STATE_OFF;
3356 pmu->pmu_ready = false; 3321 pmu->pmu_ready = false;
3357 pmu->perfmon_ready = false; 3322 pmu->perfmon_ready = false;
3358 pmu->zbc_ready = false; 3323 pmu->zbc_ready = false;
3359 pmu->elpg_ready = false;
3360 3324
3361 gk20a_dbg_fn("done"); 3325 gk20a_dbg_fn("done");
3362 return 0; 3326 return 0;