summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c16
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.c72
-rw-r--r--drivers/gpu/nvgpu/gm20b/clk_gm20b.h4
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c12
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c4
5 files changed, 54 insertions, 54 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 40a28136..c1cefc29 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -77,10 +77,10 @@ static get_ucode_details pmu_acr_supp_ucode_list[] = {
77static void start_gm20b_pmu(struct gk20a *g) 77static void start_gm20b_pmu(struct gk20a *g)
78{ 78{
79 /*disable irqs for hs falcon booting as we will poll for halt*/ 79 /*disable irqs for hs falcon booting as we will poll for halt*/
80 mutex_lock(&g->pmu.isr_mutex); 80 nvgpu_mutex_acquire(&g->pmu.isr_mutex);
81 pmu_enable_irq(&g->pmu, true); 81 pmu_enable_irq(&g->pmu, true);
82 g->pmu.isr_enabled = true; 82 g->pmu.isr_enabled = true;
83 mutex_unlock(&g->pmu.isr_mutex); 83 nvgpu_mutex_release(&g->pmu.isr_mutex);
84 gk20a_writel(g, pwr_falcon_cpuctl_alias_r(), 84 gk20a_writel(g, pwr_falcon_cpuctl_alias_r(),
85 pwr_falcon_cpuctl_startcpu_f(1)); 85 pwr_falcon_cpuctl_startcpu_f(1));
86} 86}
@@ -1282,10 +1282,10 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
1282 1282
1283 gk20a_dbg_fn(""); 1283 gk20a_dbg_fn("");
1284 1284
1285 mutex_lock(&pmu->isr_mutex); 1285 nvgpu_mutex_acquire(&pmu->isr_mutex);
1286 pmu_reset(pmu); 1286 pmu_reset(pmu);
1287 pmu->isr_enabled = true; 1287 pmu->isr_enabled = true;
1288 mutex_unlock(&pmu->isr_mutex); 1288 nvgpu_mutex_release(&pmu->isr_mutex);
1289 1289
1290 /* setup apertures - virtual */ 1290 /* setup apertures - virtual */
1291 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), 1291 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
@@ -1318,10 +1318,10 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1318 1318
1319 gk20a_dbg_fn(""); 1319 gk20a_dbg_fn("");
1320 1320
1321 mutex_lock(&pmu->isr_mutex); 1321 nvgpu_mutex_acquire(&pmu->isr_mutex);
1322 g->ops.pmu.reset(g); 1322 g->ops.pmu.reset(g);
1323 pmu->isr_enabled = true; 1323 pmu->isr_enabled = true;
1324 mutex_unlock(&pmu->isr_mutex); 1324 nvgpu_mutex_release(&pmu->isr_mutex);
1325 1325
1326 /* setup apertures - virtual */ 1326 /* setup apertures - virtual */
1327 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), 1327 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
@@ -1353,10 +1353,10 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1353 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), 1353 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
1354 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); 1354 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
1355 /*disable irqs for hs falcon booting as we will poll for halt*/ 1355 /*disable irqs for hs falcon booting as we will poll for halt*/
1356 mutex_lock(&pmu->isr_mutex); 1356 nvgpu_mutex_acquire(&pmu->isr_mutex);
1357 pmu_enable_irq(pmu, false); 1357 pmu_enable_irq(pmu, false);
1358 pmu->isr_enabled = false; 1358 pmu->isr_enabled = false;
1359 mutex_unlock(&pmu->isr_mutex); 1359 nvgpu_mutex_release(&pmu->isr_mutex);
1360 /*Clearing mailbox register used to reflect capabilities*/ 1360 /*Clearing mailbox register used to reflect capabilities*/
1361 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0); 1361 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
1362 err = bl_bootstrap(pmu, desc, bl_sz); 1362 err = bl_bootstrap(pmu, desc, bl_sz);
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
index 8db4944e..fc352151 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.c
@@ -1191,7 +1191,7 @@ static int gm20b_init_clk_setup_sw(struct gk20a *g)
1191 } 1191 }
1192#endif 1192#endif
1193 1193
1194 mutex_init(&clk->clk_mutex); 1194 nvgpu_mutex_init(&clk->clk_mutex);
1195 1195
1196 clk->sw_ready = true; 1196 clk->sw_ready = true;
1197 1197
@@ -1212,10 +1212,10 @@ static int gm20b_clk_prepare(struct clk_hw *hw)
1212 struct clk_gk20a *clk = to_clk_gk20a(hw); 1212 struct clk_gk20a *clk = to_clk_gk20a(hw);
1213 int ret = 0; 1213 int ret = 0;
1214 1214
1215 mutex_lock(&clk->clk_mutex); 1215 nvgpu_mutex_acquire(&clk->clk_mutex);
1216 if (!clk->gpc_pll.enabled && clk->clk_hw_on) 1216 if (!clk->gpc_pll.enabled && clk->clk_hw_on)
1217 ret = set_pll_freq(clk->g, 1); 1217 ret = set_pll_freq(clk->g, 1);
1218 mutex_unlock(&clk->clk_mutex); 1218 nvgpu_mutex_release(&clk->clk_mutex);
1219 return ret; 1219 return ret;
1220} 1220}
1221 1221
@@ -1223,10 +1223,10 @@ static void gm20b_clk_unprepare(struct clk_hw *hw)
1223{ 1223{
1224 struct clk_gk20a *clk = to_clk_gk20a(hw); 1224 struct clk_gk20a *clk = to_clk_gk20a(hw);
1225 1225
1226 mutex_lock(&clk->clk_mutex); 1226 nvgpu_mutex_acquire(&clk->clk_mutex);
1227 if (clk->gpc_pll.enabled && clk->clk_hw_on) 1227 if (clk->gpc_pll.enabled && clk->clk_hw_on)
1228 clk_disable_gpcpll(clk->g, 1); 1228 clk_disable_gpcpll(clk->g, 1);
1229 mutex_unlock(&clk->clk_mutex); 1229 nvgpu_mutex_release(&clk->clk_mutex);
1230} 1230}
1231 1231
1232static int gm20b_clk_is_prepared(struct clk_hw *hw) 1232static int gm20b_clk_is_prepared(struct clk_hw *hw)
@@ -1250,12 +1250,12 @@ static int gm20b_gpcclk_set_rate(struct clk_hw *hw, unsigned long rate,
1250 u32 old_freq; 1250 u32 old_freq;
1251 int ret = -ENODATA; 1251 int ret = -ENODATA;
1252 1252
1253 mutex_lock(&clk->clk_mutex); 1253 nvgpu_mutex_acquire(&clk->clk_mutex);
1254 old_freq = clk->gpc_pll.freq; 1254 old_freq = clk->gpc_pll.freq;
1255 ret = set_pll_target(clk->g, rate_gpu_to_gpc2clk(rate), old_freq); 1255 ret = set_pll_target(clk->g, rate_gpu_to_gpc2clk(rate), old_freq);
1256 if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) 1256 if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on)
1257 ret = set_pll_freq(clk->g, 1); 1257 ret = set_pll_freq(clk->g, 1);
1258 mutex_unlock(&clk->clk_mutex); 1258 nvgpu_mutex_release(&clk->clk_mutex);
1259 1259
1260 return ret; 1260 return ret;
1261} 1261}
@@ -1272,7 +1272,7 @@ static long gm20b_round_rate(struct clk_hw *hw, unsigned long rate,
1272 if (rate > maxrate) 1272 if (rate > maxrate)
1273 rate = maxrate; 1273 rate = maxrate;
1274 1274
1275 mutex_lock(&clk->clk_mutex); 1275 nvgpu_mutex_acquire(&clk->clk_mutex);
1276 freq = rate_gpu_to_gpc2clk(rate); 1276 freq = rate_gpu_to_gpc2clk(rate);
1277 if (freq > gpc_pll_params.max_freq) 1277 if (freq > gpc_pll_params.max_freq)
1278 freq = gpc_pll_params.max_freq; 1278 freq = gpc_pll_params.max_freq;
@@ -1281,7 +1281,7 @@ static long gm20b_round_rate(struct clk_hw *hw, unsigned long rate,
1281 1281
1282 tmp_pll = clk->gpc_pll; 1282 tmp_pll = clk->gpc_pll;
1283 clk_config_pll(clk, &tmp_pll, &gpc_pll_params, &freq, true); 1283 clk_config_pll(clk, &tmp_pll, &gpc_pll_params, &freq, true);
1284 mutex_unlock(&clk->clk_mutex); 1284 nvgpu_mutex_release(&clk->clk_mutex);
1285 1285
1286 return rate_gpc2clk_to_gpu(tmp_pll.freq); 1286 return rate_gpc2clk_to_gpu(tmp_pll.freq);
1287} 1287}
@@ -1445,14 +1445,14 @@ static int gm20b_clk_export_set_rate(void *data, unsigned long *rate)
1445 struct clk_gk20a *clk = &g->clk; 1445 struct clk_gk20a *clk = &g->clk;
1446 1446
1447 if (rate) { 1447 if (rate) {
1448 mutex_lock(&clk->clk_mutex); 1448 nvgpu_mutex_acquire(&clk->clk_mutex);
1449 old_freq = clk->gpc_pll.freq; 1449 old_freq = clk->gpc_pll.freq;
1450 ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq); 1450 ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq);
1451 if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) 1451 if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on)
1452 ret = set_pll_freq(g, 1); 1452 ret = set_pll_freq(g, 1);
1453 if (!ret) 1453 if (!ret)
1454 *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); 1454 *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq);
1455 mutex_unlock(&clk->clk_mutex); 1455 nvgpu_mutex_release(&clk->clk_mutex);
1456 } 1456 }
1457 return ret; 1457 return ret;
1458} 1458}
@@ -1463,10 +1463,10 @@ static int gm20b_clk_export_enable(void *data)
1463 struct gk20a *g = data; 1463 struct gk20a *g = data;
1464 struct clk_gk20a *clk = &g->clk; 1464 struct clk_gk20a *clk = &g->clk;
1465 1465
1466 mutex_lock(&clk->clk_mutex); 1466 nvgpu_mutex_acquire(&clk->clk_mutex);
1467 if (!clk->gpc_pll.enabled && clk->clk_hw_on) 1467 if (!clk->gpc_pll.enabled && clk->clk_hw_on)
1468 ret = set_pll_freq(g, 1); 1468 ret = set_pll_freq(g, 1);
1469 mutex_unlock(&clk->clk_mutex); 1469 nvgpu_mutex_release(&clk->clk_mutex);
1470 return ret; 1470 return ret;
1471} 1471}
1472 1472
@@ -1475,10 +1475,10 @@ static void gm20b_clk_export_disable(void *data)
1475 struct gk20a *g = data; 1475 struct gk20a *g = data;
1476 struct clk_gk20a *clk = &g->clk; 1476 struct clk_gk20a *clk = &g->clk;
1477 1477
1478 mutex_lock(&clk->clk_mutex); 1478 nvgpu_mutex_acquire(&clk->clk_mutex);
1479 if (clk->gpc_pll.enabled && clk->clk_hw_on) 1479 if (clk->gpc_pll.enabled && clk->clk_hw_on)
1480 clk_disable_gpcpll(g, 1); 1480 clk_disable_gpcpll(g, 1);
1481 mutex_unlock(&clk->clk_mutex); 1481 nvgpu_mutex_release(&clk->clk_mutex);
1482} 1482}
1483 1483
1484static void gm20b_clk_export_init(void *data, unsigned long *rate, bool *state) 1484static void gm20b_clk_export_init(void *data, unsigned long *rate, bool *state)
@@ -1486,12 +1486,12 @@ static void gm20b_clk_export_init(void *data, unsigned long *rate, bool *state)
1486 struct gk20a *g = data; 1486 struct gk20a *g = data;
1487 struct clk_gk20a *clk = &g->clk; 1487 struct clk_gk20a *clk = &g->clk;
1488 1488
1489 mutex_lock(&clk->clk_mutex); 1489 nvgpu_mutex_acquire(&clk->clk_mutex);
1490 if (state) 1490 if (state)
1491 *state = clk->gpc_pll.enabled; 1491 *state = clk->gpc_pll.enabled;
1492 if (rate) 1492 if (rate)
1493 *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); 1493 *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq);
1494 mutex_unlock(&clk->clk_mutex); 1494 nvgpu_mutex_release(&clk->clk_mutex);
1495} 1495}
1496 1496
1497static struct tegra_clk_export_ops gm20b_clk_export_ops = { 1497static struct tegra_clk_export_ops gm20b_clk_export_ops = {
@@ -1539,11 +1539,11 @@ static int gm20b_init_clk_support(struct gk20a *g)
1539 return err; 1539 return err;
1540#endif 1540#endif
1541 1541
1542 mutex_lock(&clk->clk_mutex); 1542 nvgpu_mutex_acquire(&clk->clk_mutex);
1543 clk->clk_hw_on = true; 1543 clk->clk_hw_on = true;
1544 1544
1545 err = gm20b_init_clk_setup_hw(g); 1545 err = gm20b_init_clk_setup_hw(g);
1546 mutex_unlock(&clk->clk_mutex); 1546 nvgpu_mutex_release(&clk->clk_mutex);
1547 if (err) 1547 if (err)
1548 return err; 1548 return err;
1549 1549
@@ -1559,10 +1559,10 @@ static int gm20b_init_clk_support(struct gk20a *g)
1559 return err; 1559 return err;
1560 1560
1561 /* The prev call may not enable PLL if gbus is unbalanced - force it */ 1561 /* The prev call may not enable PLL if gbus is unbalanced - force it */
1562 mutex_lock(&clk->clk_mutex); 1562 nvgpu_mutex_acquire(&clk->clk_mutex);
1563 if (!clk->gpc_pll.enabled) 1563 if (!clk->gpc_pll.enabled)
1564 err = set_pll_freq(g, 1); 1564 err = set_pll_freq(g, 1);
1565 mutex_unlock(&clk->clk_mutex); 1565 nvgpu_mutex_release(&clk->clk_mutex);
1566 if (err) 1566 if (err)
1567 return err; 1567 return err;
1568 1568
@@ -1582,11 +1582,11 @@ static int gm20b_suspend_clk_support(struct gk20a *g)
1582 clk_disable_unprepare(g->clk.tegra_clk); 1582 clk_disable_unprepare(g->clk.tegra_clk);
1583 1583
1584 /* The prev call may not disable PLL if gbus is unbalanced - force it */ 1584 /* The prev call may not disable PLL if gbus is unbalanced - force it */
1585 mutex_lock(&g->clk.clk_mutex); 1585 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1586 if (g->clk.gpc_pll.enabled) 1586 if (g->clk.gpc_pll.enabled)
1587 ret = clk_disable_gpcpll(g, 1); 1587 ret = clk_disable_gpcpll(g, 1);
1588 g->clk.clk_hw_on = false; 1588 g->clk.clk_hw_on = false;
1589 mutex_unlock(&g->clk.clk_mutex); 1589 nvgpu_mutex_release(&g->clk.clk_mutex);
1590 return ret; 1590 return ret;
1591} 1591}
1592 1592
@@ -1616,11 +1616,11 @@ static int pll_reg_show(struct seq_file *s, void *data)
1616 struct gk20a *g = s->private; 1616 struct gk20a *g = s->private;
1617 u32 reg, m, n, pl, f; 1617 u32 reg, m, n, pl, f;
1618 1618
1619 mutex_lock(&g->clk.clk_mutex); 1619 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1620 if (!g->clk.clk_hw_on) { 1620 if (!g->clk.clk_hw_on) {
1621 seq_printf(s, "%s powered down - no access to registers\n", 1621 seq_printf(s, "%s powered down - no access to registers\n",
1622 dev_name(dev_from_gk20a(g))); 1622 dev_name(dev_from_gk20a(g)));
1623 mutex_unlock(&g->clk.clk_mutex); 1623 nvgpu_mutex_release(&g->clk.clk_mutex);
1624 return 0; 1624 return 0;
1625 } 1625 }
1626 1626
@@ -1642,7 +1642,7 @@ static int pll_reg_show(struct seq_file *s, void *data)
1642 f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div(pl)); 1642 f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div(pl));
1643 seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl); 1643 seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl);
1644 seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2); 1644 seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2);
1645 mutex_unlock(&g->clk.clk_mutex); 1645 nvgpu_mutex_release(&g->clk.clk_mutex);
1646 return 0; 1646 return 0;
1647} 1647}
1648 1648
@@ -1663,11 +1663,11 @@ static int pll_reg_raw_show(struct seq_file *s, void *data)
1663 struct gk20a *g = s->private; 1663 struct gk20a *g = s->private;
1664 u32 reg; 1664 u32 reg;
1665 1665
1666 mutex_lock(&g->clk.clk_mutex); 1666 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1667 if (!g->clk.clk_hw_on) { 1667 if (!g->clk.clk_hw_on) {
1668 seq_printf(s, "%s powered down - no access to registers\n", 1668 seq_printf(s, "%s powered down - no access to registers\n",
1669 dev_name(dev_from_gk20a(g))); 1669 dev_name(dev_from_gk20a(g)));
1670 mutex_unlock(&g->clk.clk_mutex); 1670 nvgpu_mutex_release(&g->clk.clk_mutex);
1671 return 0; 1671 return 0;
1672 } 1672 }
1673 1673
@@ -1685,7 +1685,7 @@ static int pll_reg_raw_show(struct seq_file *s, void *data)
1685 reg = trim_sys_bypassctrl_r(); 1685 reg = trim_sys_bypassctrl_r();
1686 seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg)); 1686 seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1687 1687
1688 mutex_unlock(&g->clk.clk_mutex); 1688 nvgpu_mutex_release(&g->clk.clk_mutex);
1689 return 0; 1689 return 0;
1690} 1690}
1691 1691
@@ -1722,13 +1722,13 @@ static ssize_t pll_reg_raw_write(struct file *file,
1722 (reg != trim_sys_bypassctrl_r())) 1722 (reg != trim_sys_bypassctrl_r()))
1723 return -EPERM; 1723 return -EPERM;
1724 1724
1725 mutex_lock(&g->clk.clk_mutex); 1725 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1726 if (!g->clk.clk_hw_on) { 1726 if (!g->clk.clk_hw_on) {
1727 mutex_unlock(&g->clk.clk_mutex); 1727 nvgpu_mutex_release(&g->clk.clk_mutex);
1728 return -EBUSY; 1728 return -EBUSY;
1729 } 1729 }
1730 gk20a_writel(g, reg, val); 1730 gk20a_writel(g, reg, val);
1731 mutex_unlock(&g->clk.clk_mutex); 1731 nvgpu_mutex_release(&g->clk.clk_mutex);
1732 return count; 1732 return count;
1733} 1733}
1734 1734
@@ -1755,7 +1755,7 @@ static int monitor_get(void *data, u64 *val)
1755 if (err) 1755 if (err)
1756 return err; 1756 return err;
1757 1757
1758 mutex_lock(&g->clk.clk_mutex); 1758 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1759 1759
1760 /* Disable clock slowdown during measurements */ 1760 /* Disable clock slowdown during measurements */
1761 clk_slowdown_save = gk20a_readl(g, therm_clk_slowdown_r(0)); 1761 clk_slowdown_save = gk20a_readl(g, therm_clk_slowdown_r(0));
@@ -1787,7 +1787,7 @@ static int monitor_get(void *data, u64 *val)
1787 1787
1788 /* Restore clock slowdown */ 1788 /* Restore clock slowdown */
1789 gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save); 1789 gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save);
1790 mutex_unlock(&g->clk.clk_mutex); 1790 nvgpu_mutex_release(&g->clk.clk_mutex);
1791 1791
1792 gk20a_idle(g->dev); 1792 gk20a_idle(g->dev);
1793 1793
@@ -1811,14 +1811,14 @@ static int voltage_get(void *data, u64 *val)
1811 if (err) 1811 if (err)
1812 return err; 1812 return err;
1813 1813
1814 mutex_lock(&g->clk.clk_mutex); 1814 nvgpu_mutex_acquire(&g->clk.clk_mutex);
1815 1815
1816 det_out = gk20a_readl(g, trim_sys_gpcpll_cfg3_r()); 1816 det_out = gk20a_readl(g, trim_sys_gpcpll_cfg3_r());
1817 det_out = trim_sys_gpcpll_cfg3_dfs_testout_v(det_out); 1817 det_out = trim_sys_gpcpll_cfg3_dfs_testout_v(det_out);
1818 *val = div64_u64((u64)det_out * gpc_pll_params.uvdet_slope + 1818 *val = div64_u64((u64)det_out * gpc_pll_params.uvdet_slope +
1819 gpc_pll_params.uvdet_offs, 1000ULL); 1819 gpc_pll_params.uvdet_offs, 1000ULL);
1820 1820
1821 mutex_unlock(&g->clk.clk_mutex); 1821 nvgpu_mutex_release(&g->clk.clk_mutex);
1822 1822
1823 gk20a_idle(g->dev); 1823 gk20a_idle(g->dev);
1824 return 0; 1824 return 0;
diff --git a/drivers/gpu/nvgpu/gm20b/clk_gm20b.h b/drivers/gpu/nvgpu/gm20b/clk_gm20b.h
index 7ea84826..5746165e 100644
--- a/drivers/gpu/nvgpu/gm20b/clk_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/clk_gm20b.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * GM20B Graphics 2 * GM20B Graphics
3 * 3 *
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -19,7 +19,7 @@
19#ifndef _NVHOST_CLK_GM20B_H_ 19#ifndef _NVHOST_CLK_GM20B_H_
20#define _NVHOST_CLK_GM20B_H_ 20#define _NVHOST_CLK_GM20B_H_
21 21
22#include <linux/mutex.h> 22#include <nvgpu/lock.h>
23 23
24void gm20b_init_clk_ops(struct gpu_ops *gops); 24void gm20b_init_clk_ops(struct gpu_ops *gops);
25 25
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 0375d71f..9cf644fd 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -1247,7 +1247,7 @@ static int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc)
1247 GPU_LIT_TPC_IN_GPC_STRIDE); 1247 GPU_LIT_TPC_IN_GPC_STRIDE);
1248 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; 1248 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
1249 1249
1250 mutex_lock(&g->dbg_sessions_lock); 1250 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1251 1251
1252 sm_id = gr_gpc0_tpc0_sm_cfg_sm_id_v(gk20a_readl(g, 1252 sm_id = gr_gpc0_tpc0_sm_cfg_sm_id_v(gk20a_readl(g,
1253 gr_gpc0_tpc0_sm_cfg_r() + offset)); 1253 gr_gpc0_tpc0_sm_cfg_r() + offset));
@@ -1263,7 +1263,7 @@ static int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc)
1263 gr->sm_error_states[sm_id].hww_warp_esr_report_mask = gk20a_readl(g, 1263 gr->sm_error_states[sm_id].hww_warp_esr_report_mask = gk20a_readl(g,
1264 gr_gpc0_tpc0_sm_hww_warp_esr_report_mask_r() + offset); 1264 gr_gpc0_tpc0_sm_hww_warp_esr_report_mask_r() + offset);
1265 1265
1266 mutex_unlock(&g->dbg_sessions_lock); 1266 nvgpu_mutex_release(&g->dbg_sessions_lock);
1267 1267
1268 return 0; 1268 return 0;
1269} 1269}
@@ -1280,7 +1280,7 @@ static int gm20b_gr_update_sm_error_state(struct gk20a *g,
1280 GPU_LIT_TPC_IN_GPC_STRIDE); 1280 GPU_LIT_TPC_IN_GPC_STRIDE);
1281 int err = 0; 1281 int err = 0;
1282 1282
1283 mutex_lock(&g->dbg_sessions_lock); 1283 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1284 1284
1285 gr->sm_error_states[sm_id].hww_global_esr = 1285 gr->sm_error_states[sm_id].hww_global_esr =
1286 sm_error_state->hww_global_esr; 1286 sm_error_state->hww_global_esr;
@@ -1336,7 +1336,7 @@ enable_ctxsw:
1336 err = gr_gk20a_enable_ctxsw(g); 1336 err = gr_gk20a_enable_ctxsw(g);
1337 1337
1338fail: 1338fail:
1339 mutex_unlock(&g->dbg_sessions_lock); 1339 nvgpu_mutex_release(&g->dbg_sessions_lock);
1340 return err; 1340 return err;
1341} 1341}
1342 1342
@@ -1351,7 +1351,7 @@ static int gm20b_gr_clear_sm_error_state(struct gk20a *g,
1351 GPU_LIT_TPC_IN_GPC_STRIDE); 1351 GPU_LIT_TPC_IN_GPC_STRIDE);
1352 int err = 0; 1352 int err = 0;
1353 1353
1354 mutex_lock(&g->dbg_sessions_lock); 1354 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1355 1355
1356 memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states)); 1356 memset(&gr->sm_error_states[sm_id], 0, sizeof(*gr->sm_error_states));
1357 1357
@@ -1377,7 +1377,7 @@ static int gm20b_gr_clear_sm_error_state(struct gk20a *g,
1377 err = gr_gk20a_enable_ctxsw(g); 1377 err = gr_gk20a_enable_ctxsw(g);
1378 1378
1379fail: 1379fail:
1380 mutex_unlock(&g->dbg_sessions_lock); 1380 nvgpu_mutex_release(&g->dbg_sessions_lock);
1381 return err; 1381 return err;
1382} 1382}
1383 1383
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index 3324d3df..11258032 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -120,7 +120,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
120 if (gr->compbit_store.mem.size == 0) 120 if (gr->compbit_store.mem.size == 0)
121 return 0; 121 return 0;
122 122
123 mutex_lock(&g->mm.l2_op_lock); 123 nvgpu_mutex_acquire(&g->mm.l2_op_lock);
124 124
125 if (op == gk20a_cbc_op_clear) { 125 if (op == gk20a_cbc_op_clear) {
126 gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl2_r(), 126 gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl2_r(),
@@ -163,7 +163,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
163 } 163 }
164out: 164out:
165 trace_gk20a_ltc_cbc_ctrl_done(dev_name(g->dev)); 165 trace_gk20a_ltc_cbc_ctrl_done(dev_name(g->dev));
166 mutex_unlock(&g->mm.l2_op_lock); 166 nvgpu_mutex_release(&g->mm.l2_op_lock);
167 return err; 167 return err;
168} 168}
169 169