diff options
author | Deepak Nibade <dnibade@nvidia.com> | 2017-01-24 08:30:42 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-02-22 07:15:02 -0500 |
commit | 8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch) | |
tree | 505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gm20b/clk_gm20b.c | |
parent | 1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff) |
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks
directly, use new APIs defined in <nvgpu/lock.h>
Replace Linux specific mutex/spinlock declaration,
init, lock, unlock APIs with new APIs
e.g
struct mutex is replaced by struct nvgpu_mutex and
mutex_lock() is replaced by nvgpu_mutex_acquire()
And also include <nvgpu/lock.h> instead of including
<linux/mutex.h> and <linux/spinlock.h>
Add explicit nvgpu/lock.h includes to below
files to fix complilation failures.
gk20a/platform_gk20a.h
include/nvgpu/allocator.h
Jira NVGPU-13
Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/1293187
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/clk_gm20b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/clk_gm20b.c | 72 |
1 files changed, 36 insertions, 36 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c index 8db4944e..fc352151 100644 --- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c | |||
@@ -1191,7 +1191,7 @@ static int gm20b_init_clk_setup_sw(struct gk20a *g) | |||
1191 | } | 1191 | } |
1192 | #endif | 1192 | #endif |
1193 | 1193 | ||
1194 | mutex_init(&clk->clk_mutex); | 1194 | nvgpu_mutex_init(&clk->clk_mutex); |
1195 | 1195 | ||
1196 | clk->sw_ready = true; | 1196 | clk->sw_ready = true; |
1197 | 1197 | ||
@@ -1212,10 +1212,10 @@ static int gm20b_clk_prepare(struct clk_hw *hw) | |||
1212 | struct clk_gk20a *clk = to_clk_gk20a(hw); | 1212 | struct clk_gk20a *clk = to_clk_gk20a(hw); |
1213 | int ret = 0; | 1213 | int ret = 0; |
1214 | 1214 | ||
1215 | mutex_lock(&clk->clk_mutex); | 1215 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1216 | if (!clk->gpc_pll.enabled && clk->clk_hw_on) | 1216 | if (!clk->gpc_pll.enabled && clk->clk_hw_on) |
1217 | ret = set_pll_freq(clk->g, 1); | 1217 | ret = set_pll_freq(clk->g, 1); |
1218 | mutex_unlock(&clk->clk_mutex); | 1218 | nvgpu_mutex_release(&clk->clk_mutex); |
1219 | return ret; | 1219 | return ret; |
1220 | } | 1220 | } |
1221 | 1221 | ||
@@ -1223,10 +1223,10 @@ static void gm20b_clk_unprepare(struct clk_hw *hw) | |||
1223 | { | 1223 | { |
1224 | struct clk_gk20a *clk = to_clk_gk20a(hw); | 1224 | struct clk_gk20a *clk = to_clk_gk20a(hw); |
1225 | 1225 | ||
1226 | mutex_lock(&clk->clk_mutex); | 1226 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1227 | if (clk->gpc_pll.enabled && clk->clk_hw_on) | 1227 | if (clk->gpc_pll.enabled && clk->clk_hw_on) |
1228 | clk_disable_gpcpll(clk->g, 1); | 1228 | clk_disable_gpcpll(clk->g, 1); |
1229 | mutex_unlock(&clk->clk_mutex); | 1229 | nvgpu_mutex_release(&clk->clk_mutex); |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | static int gm20b_clk_is_prepared(struct clk_hw *hw) | 1232 | static int gm20b_clk_is_prepared(struct clk_hw *hw) |
@@ -1250,12 +1250,12 @@ static int gm20b_gpcclk_set_rate(struct clk_hw *hw, unsigned long rate, | |||
1250 | u32 old_freq; | 1250 | u32 old_freq; |
1251 | int ret = -ENODATA; | 1251 | int ret = -ENODATA; |
1252 | 1252 | ||
1253 | mutex_lock(&clk->clk_mutex); | 1253 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1254 | old_freq = clk->gpc_pll.freq; | 1254 | old_freq = clk->gpc_pll.freq; |
1255 | ret = set_pll_target(clk->g, rate_gpu_to_gpc2clk(rate), old_freq); | 1255 | ret = set_pll_target(clk->g, rate_gpu_to_gpc2clk(rate), old_freq); |
1256 | if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) | 1256 | if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) |
1257 | ret = set_pll_freq(clk->g, 1); | 1257 | ret = set_pll_freq(clk->g, 1); |
1258 | mutex_unlock(&clk->clk_mutex); | 1258 | nvgpu_mutex_release(&clk->clk_mutex); |
1259 | 1259 | ||
1260 | return ret; | 1260 | return ret; |
1261 | } | 1261 | } |
@@ -1272,7 +1272,7 @@ static long gm20b_round_rate(struct clk_hw *hw, unsigned long rate, | |||
1272 | if (rate > maxrate) | 1272 | if (rate > maxrate) |
1273 | rate = maxrate; | 1273 | rate = maxrate; |
1274 | 1274 | ||
1275 | mutex_lock(&clk->clk_mutex); | 1275 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1276 | freq = rate_gpu_to_gpc2clk(rate); | 1276 | freq = rate_gpu_to_gpc2clk(rate); |
1277 | if (freq > gpc_pll_params.max_freq) | 1277 | if (freq > gpc_pll_params.max_freq) |
1278 | freq = gpc_pll_params.max_freq; | 1278 | freq = gpc_pll_params.max_freq; |
@@ -1281,7 +1281,7 @@ static long gm20b_round_rate(struct clk_hw *hw, unsigned long rate, | |||
1281 | 1281 | ||
1282 | tmp_pll = clk->gpc_pll; | 1282 | tmp_pll = clk->gpc_pll; |
1283 | clk_config_pll(clk, &tmp_pll, &gpc_pll_params, &freq, true); | 1283 | clk_config_pll(clk, &tmp_pll, &gpc_pll_params, &freq, true); |
1284 | mutex_unlock(&clk->clk_mutex); | 1284 | nvgpu_mutex_release(&clk->clk_mutex); |
1285 | 1285 | ||
1286 | return rate_gpc2clk_to_gpu(tmp_pll.freq); | 1286 | return rate_gpc2clk_to_gpu(tmp_pll.freq); |
1287 | } | 1287 | } |
@@ -1445,14 +1445,14 @@ static int gm20b_clk_export_set_rate(void *data, unsigned long *rate) | |||
1445 | struct clk_gk20a *clk = &g->clk; | 1445 | struct clk_gk20a *clk = &g->clk; |
1446 | 1446 | ||
1447 | if (rate) { | 1447 | if (rate) { |
1448 | mutex_lock(&clk->clk_mutex); | 1448 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1449 | old_freq = clk->gpc_pll.freq; | 1449 | old_freq = clk->gpc_pll.freq; |
1450 | ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq); | 1450 | ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq); |
1451 | if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) | 1451 | if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) |
1452 | ret = set_pll_freq(g, 1); | 1452 | ret = set_pll_freq(g, 1); |
1453 | if (!ret) | 1453 | if (!ret) |
1454 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); | 1454 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); |
1455 | mutex_unlock(&clk->clk_mutex); | 1455 | nvgpu_mutex_release(&clk->clk_mutex); |
1456 | } | 1456 | } |
1457 | return ret; | 1457 | return ret; |
1458 | } | 1458 | } |
@@ -1463,10 +1463,10 @@ static int gm20b_clk_export_enable(void *data) | |||
1463 | struct gk20a *g = data; | 1463 | struct gk20a *g = data; |
1464 | struct clk_gk20a *clk = &g->clk; | 1464 | struct clk_gk20a *clk = &g->clk; |
1465 | 1465 | ||
1466 | mutex_lock(&clk->clk_mutex); | 1466 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1467 | if (!clk->gpc_pll.enabled && clk->clk_hw_on) | 1467 | if (!clk->gpc_pll.enabled && clk->clk_hw_on) |
1468 | ret = set_pll_freq(g, 1); | 1468 | ret = set_pll_freq(g, 1); |
1469 | mutex_unlock(&clk->clk_mutex); | 1469 | nvgpu_mutex_release(&clk->clk_mutex); |
1470 | return ret; | 1470 | return ret; |
1471 | } | 1471 | } |
1472 | 1472 | ||
@@ -1475,10 +1475,10 @@ static void gm20b_clk_export_disable(void *data) | |||
1475 | struct gk20a *g = data; | 1475 | struct gk20a *g = data; |
1476 | struct clk_gk20a *clk = &g->clk; | 1476 | struct clk_gk20a *clk = &g->clk; |
1477 | 1477 | ||
1478 | mutex_lock(&clk->clk_mutex); | 1478 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1479 | if (clk->gpc_pll.enabled && clk->clk_hw_on) | 1479 | if (clk->gpc_pll.enabled && clk->clk_hw_on) |
1480 | clk_disable_gpcpll(g, 1); | 1480 | clk_disable_gpcpll(g, 1); |
1481 | mutex_unlock(&clk->clk_mutex); | 1481 | nvgpu_mutex_release(&clk->clk_mutex); |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | static void gm20b_clk_export_init(void *data, unsigned long *rate, bool *state) | 1484 | static void gm20b_clk_export_init(void *data, unsigned long *rate, bool *state) |
@@ -1486,12 +1486,12 @@ static void gm20b_clk_export_init(void *data, unsigned long *rate, bool *state) | |||
1486 | struct gk20a *g = data; | 1486 | struct gk20a *g = data; |
1487 | struct clk_gk20a *clk = &g->clk; | 1487 | struct clk_gk20a *clk = &g->clk; |
1488 | 1488 | ||
1489 | mutex_lock(&clk->clk_mutex); | 1489 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1490 | if (state) | 1490 | if (state) |
1491 | *state = clk->gpc_pll.enabled; | 1491 | *state = clk->gpc_pll.enabled; |
1492 | if (rate) | 1492 | if (rate) |
1493 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); | 1493 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); |
1494 | mutex_unlock(&clk->clk_mutex); | 1494 | nvgpu_mutex_release(&clk->clk_mutex); |
1495 | } | 1495 | } |
1496 | 1496 | ||
1497 | static struct tegra_clk_export_ops gm20b_clk_export_ops = { | 1497 | static struct tegra_clk_export_ops gm20b_clk_export_ops = { |
@@ -1539,11 +1539,11 @@ static int gm20b_init_clk_support(struct gk20a *g) | |||
1539 | return err; | 1539 | return err; |
1540 | #endif | 1540 | #endif |
1541 | 1541 | ||
1542 | mutex_lock(&clk->clk_mutex); | 1542 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1543 | clk->clk_hw_on = true; | 1543 | clk->clk_hw_on = true; |
1544 | 1544 | ||
1545 | err = gm20b_init_clk_setup_hw(g); | 1545 | err = gm20b_init_clk_setup_hw(g); |
1546 | mutex_unlock(&clk->clk_mutex); | 1546 | nvgpu_mutex_release(&clk->clk_mutex); |
1547 | if (err) | 1547 | if (err) |
1548 | return err; | 1548 | return err; |
1549 | 1549 | ||
@@ -1559,10 +1559,10 @@ static int gm20b_init_clk_support(struct gk20a *g) | |||
1559 | return err; | 1559 | return err; |
1560 | 1560 | ||
1561 | /* The prev call may not enable PLL if gbus is unbalanced - force it */ | 1561 | /* The prev call may not enable PLL if gbus is unbalanced - force it */ |
1562 | mutex_lock(&clk->clk_mutex); | 1562 | nvgpu_mutex_acquire(&clk->clk_mutex); |
1563 | if (!clk->gpc_pll.enabled) | 1563 | if (!clk->gpc_pll.enabled) |
1564 | err = set_pll_freq(g, 1); | 1564 | err = set_pll_freq(g, 1); |
1565 | mutex_unlock(&clk->clk_mutex); | 1565 | nvgpu_mutex_release(&clk->clk_mutex); |
1566 | if (err) | 1566 | if (err) |
1567 | return err; | 1567 | return err; |
1568 | 1568 | ||
@@ -1582,11 +1582,11 @@ static int gm20b_suspend_clk_support(struct gk20a *g) | |||
1582 | clk_disable_unprepare(g->clk.tegra_clk); | 1582 | clk_disable_unprepare(g->clk.tegra_clk); |
1583 | 1583 | ||
1584 | /* The prev call may not disable PLL if gbus is unbalanced - force it */ | 1584 | /* The prev call may not disable PLL if gbus is unbalanced - force it */ |
1585 | mutex_lock(&g->clk.clk_mutex); | 1585 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
1586 | if (g->clk.gpc_pll.enabled) | 1586 | if (g->clk.gpc_pll.enabled) |
1587 | ret = clk_disable_gpcpll(g, 1); | 1587 | ret = clk_disable_gpcpll(g, 1); |
1588 | g->clk.clk_hw_on = false; | 1588 | g->clk.clk_hw_on = false; |
1589 | mutex_unlock(&g->clk.clk_mutex); | 1589 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1590 | return ret; | 1590 | return ret; |
1591 | } | 1591 | } |
1592 | 1592 | ||
@@ -1616,11 +1616,11 @@ static int pll_reg_show(struct seq_file *s, void *data) | |||
1616 | struct gk20a *g = s->private; | 1616 | struct gk20a *g = s->private; |
1617 | u32 reg, m, n, pl, f; | 1617 | u32 reg, m, n, pl, f; |
1618 | 1618 | ||
1619 | mutex_lock(&g->clk.clk_mutex); | 1619 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
1620 | if (!g->clk.clk_hw_on) { | 1620 | if (!g->clk.clk_hw_on) { |
1621 | seq_printf(s, "%s powered down - no access to registers\n", | 1621 | seq_printf(s, "%s powered down - no access to registers\n", |
1622 | dev_name(dev_from_gk20a(g))); | 1622 | dev_name(dev_from_gk20a(g))); |
1623 | mutex_unlock(&g->clk.clk_mutex); | 1623 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1624 | return 0; | 1624 | return 0; |
1625 | } | 1625 | } |
1626 | 1626 | ||
@@ -1642,7 +1642,7 @@ static int pll_reg_show(struct seq_file *s, void *data) | |||
1642 | f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div(pl)); | 1642 | f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div(pl)); |
1643 | seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl); | 1643 | seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl); |
1644 | seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2); | 1644 | seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2); |
1645 | mutex_unlock(&g->clk.clk_mutex); | 1645 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1646 | return 0; | 1646 | return 0; |
1647 | } | 1647 | } |
1648 | 1648 | ||
@@ -1663,11 +1663,11 @@ static int pll_reg_raw_show(struct seq_file *s, void *data) | |||
1663 | struct gk20a *g = s->private; | 1663 | struct gk20a *g = s->private; |
1664 | u32 reg; | 1664 | u32 reg; |
1665 | 1665 | ||
1666 | mutex_lock(&g->clk.clk_mutex); | 1666 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
1667 | if (!g->clk.clk_hw_on) { | 1667 | if (!g->clk.clk_hw_on) { |
1668 | seq_printf(s, "%s powered down - no access to registers\n", | 1668 | seq_printf(s, "%s powered down - no access to registers\n", |
1669 | dev_name(dev_from_gk20a(g))); | 1669 | dev_name(dev_from_gk20a(g))); |
1670 | mutex_unlock(&g->clk.clk_mutex); | 1670 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1671 | return 0; | 1671 | return 0; |
1672 | } | 1672 | } |
1673 | 1673 | ||
@@ -1685,7 +1685,7 @@ static int pll_reg_raw_show(struct seq_file *s, void *data) | |||
1685 | reg = trim_sys_bypassctrl_r(); | 1685 | reg = trim_sys_bypassctrl_r(); |
1686 | seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg)); | 1686 | seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg)); |
1687 | 1687 | ||
1688 | mutex_unlock(&g->clk.clk_mutex); | 1688 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1689 | return 0; | 1689 | return 0; |
1690 | } | 1690 | } |
1691 | 1691 | ||
@@ -1722,13 +1722,13 @@ static ssize_t pll_reg_raw_write(struct file *file, | |||
1722 | (reg != trim_sys_bypassctrl_r())) | 1722 | (reg != trim_sys_bypassctrl_r())) |
1723 | return -EPERM; | 1723 | return -EPERM; |
1724 | 1724 | ||
1725 | mutex_lock(&g->clk.clk_mutex); | 1725 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
1726 | if (!g->clk.clk_hw_on) { | 1726 | if (!g->clk.clk_hw_on) { |
1727 | mutex_unlock(&g->clk.clk_mutex); | 1727 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1728 | return -EBUSY; | 1728 | return -EBUSY; |
1729 | } | 1729 | } |
1730 | gk20a_writel(g, reg, val); | 1730 | gk20a_writel(g, reg, val); |
1731 | mutex_unlock(&g->clk.clk_mutex); | 1731 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1732 | return count; | 1732 | return count; |
1733 | } | 1733 | } |
1734 | 1734 | ||
@@ -1755,7 +1755,7 @@ static int monitor_get(void *data, u64 *val) | |||
1755 | if (err) | 1755 | if (err) |
1756 | return err; | 1756 | return err; |
1757 | 1757 | ||
1758 | mutex_lock(&g->clk.clk_mutex); | 1758 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
1759 | 1759 | ||
1760 | /* Disable clock slowdown during measurements */ | 1760 | /* Disable clock slowdown during measurements */ |
1761 | clk_slowdown_save = gk20a_readl(g, therm_clk_slowdown_r(0)); | 1761 | clk_slowdown_save = gk20a_readl(g, therm_clk_slowdown_r(0)); |
@@ -1787,7 +1787,7 @@ static int monitor_get(void *data, u64 *val) | |||
1787 | 1787 | ||
1788 | /* Restore clock slowdown */ | 1788 | /* Restore clock slowdown */ |
1789 | gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save); | 1789 | gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save); |
1790 | mutex_unlock(&g->clk.clk_mutex); | 1790 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1791 | 1791 | ||
1792 | gk20a_idle(g->dev); | 1792 | gk20a_idle(g->dev); |
1793 | 1793 | ||
@@ -1811,14 +1811,14 @@ static int voltage_get(void *data, u64 *val) | |||
1811 | if (err) | 1811 | if (err) |
1812 | return err; | 1812 | return err; |
1813 | 1813 | ||
1814 | mutex_lock(&g->clk.clk_mutex); | 1814 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
1815 | 1815 | ||
1816 | det_out = gk20a_readl(g, trim_sys_gpcpll_cfg3_r()); | 1816 | det_out = gk20a_readl(g, trim_sys_gpcpll_cfg3_r()); |
1817 | det_out = trim_sys_gpcpll_cfg3_dfs_testout_v(det_out); | 1817 | det_out = trim_sys_gpcpll_cfg3_dfs_testout_v(det_out); |
1818 | *val = div64_u64((u64)det_out * gpc_pll_params.uvdet_slope + | 1818 | *val = div64_u64((u64)det_out * gpc_pll_params.uvdet_slope + |
1819 | gpc_pll_params.uvdet_offs, 1000ULL); | 1819 | gpc_pll_params.uvdet_offs, 1000ULL); |
1820 | 1820 | ||
1821 | mutex_unlock(&g->clk.clk_mutex); | 1821 | nvgpu_mutex_release(&g->clk.clk_mutex); |
1822 | 1822 | ||
1823 | gk20a_idle(g->dev); | 1823 | gk20a_idle(g->dev); |
1824 | return 0; | 1824 | return 0; |