diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/clk_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/clk_gk20a.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/clk_gk20a.c b/drivers/gpu/nvgpu/gk20a/clk_gk20a.c index 32690c90..38d13b4b 100644 --- a/drivers/gpu/nvgpu/gk20a/clk_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/clk_gk20a.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GK20A Clocks | 2 | * GK20A Clocks |
3 | * | 3 | * |
4 | * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -457,7 +457,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g) | |||
457 | clk->gpc_pll.freq /= pl_to_div[clk->gpc_pll.PL]; | 457 | clk->gpc_pll.freq /= pl_to_div[clk->gpc_pll.PL]; |
458 | } | 458 | } |
459 | 459 | ||
460 | mutex_init(&clk->clk_mutex); | 460 | nvgpu_mutex_init(&clk->clk_mutex); |
461 | 461 | ||
462 | clk->sw_ready = true; | 462 | clk->sw_ready = true; |
463 | 463 | ||
@@ -538,14 +538,14 @@ static int gk20a_clk_export_set_rate(void *data, unsigned long *rate) | |||
538 | struct clk_gk20a *clk = &g->clk; | 538 | struct clk_gk20a *clk = &g->clk; |
539 | 539 | ||
540 | if (rate) { | 540 | if (rate) { |
541 | mutex_lock(&clk->clk_mutex); | 541 | nvgpu_mutex_acquire(&clk->clk_mutex); |
542 | old_freq = clk->gpc_pll.freq; | 542 | old_freq = clk->gpc_pll.freq; |
543 | ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq); | 543 | ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq); |
544 | if (!ret && clk->gpc_pll.enabled) | 544 | if (!ret && clk->gpc_pll.enabled) |
545 | ret = set_pll_freq(g, clk->gpc_pll.freq, old_freq); | 545 | ret = set_pll_freq(g, clk->gpc_pll.freq, old_freq); |
546 | if (!ret) | 546 | if (!ret) |
547 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); | 547 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); |
548 | mutex_unlock(&clk->clk_mutex); | 548 | nvgpu_mutex_release(&clk->clk_mutex); |
549 | } | 549 | } |
550 | return ret; | 550 | return ret; |
551 | } | 551 | } |
@@ -556,9 +556,9 @@ static int gk20a_clk_export_enable(void *data) | |||
556 | struct gk20a *g = data; | 556 | struct gk20a *g = data; |
557 | struct clk_gk20a *clk = &g->clk; | 557 | struct clk_gk20a *clk = &g->clk; |
558 | 558 | ||
559 | mutex_lock(&clk->clk_mutex); | 559 | nvgpu_mutex_acquire(&clk->clk_mutex); |
560 | ret = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq); | 560 | ret = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq); |
561 | mutex_unlock(&clk->clk_mutex); | 561 | nvgpu_mutex_release(&clk->clk_mutex); |
562 | return ret; | 562 | return ret; |
563 | } | 563 | } |
564 | 564 | ||
@@ -567,10 +567,10 @@ static void gk20a_clk_export_disable(void *data) | |||
567 | struct gk20a *g = data; | 567 | struct gk20a *g = data; |
568 | struct clk_gk20a *clk = &g->clk; | 568 | struct clk_gk20a *clk = &g->clk; |
569 | 569 | ||
570 | mutex_lock(&clk->clk_mutex); | 570 | nvgpu_mutex_acquire(&clk->clk_mutex); |
571 | if (g->clk.clk_hw_on) | 571 | if (g->clk.clk_hw_on) |
572 | clk_disable_gpcpll(g, 1); | 572 | clk_disable_gpcpll(g, 1); |
573 | mutex_unlock(&clk->clk_mutex); | 573 | nvgpu_mutex_release(&clk->clk_mutex); |
574 | } | 574 | } |
575 | 575 | ||
576 | static void gk20a_clk_export_init(void *data, unsigned long *rate, bool *state) | 576 | static void gk20a_clk_export_init(void *data, unsigned long *rate, bool *state) |
@@ -578,12 +578,12 @@ static void gk20a_clk_export_init(void *data, unsigned long *rate, bool *state) | |||
578 | struct gk20a *g = data; | 578 | struct gk20a *g = data; |
579 | struct clk_gk20a *clk = &g->clk; | 579 | struct clk_gk20a *clk = &g->clk; |
580 | 580 | ||
581 | mutex_lock(&clk->clk_mutex); | 581 | nvgpu_mutex_acquire(&clk->clk_mutex); |
582 | if (state) | 582 | if (state) |
583 | *state = clk->gpc_pll.enabled; | 583 | *state = clk->gpc_pll.enabled; |
584 | if (rate) | 584 | if (rate) |
585 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); | 585 | *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq); |
586 | mutex_unlock(&clk->clk_mutex); | 586 | nvgpu_mutex_release(&clk->clk_mutex); |
587 | } | 587 | } |
588 | 588 | ||
589 | static struct tegra_clk_export_ops gk20a_clk_export_ops = { | 589 | static struct tegra_clk_export_ops gk20a_clk_export_ops = { |
@@ -640,11 +640,11 @@ static int gk20a_init_clk_support(struct gk20a *g) | |||
640 | if (err) | 640 | if (err) |
641 | return err; | 641 | return err; |
642 | 642 | ||
643 | mutex_lock(&clk->clk_mutex); | 643 | nvgpu_mutex_acquire(&clk->clk_mutex); |
644 | clk->clk_hw_on = true; | 644 | clk->clk_hw_on = true; |
645 | 645 | ||
646 | err = gk20a_init_clk_setup_hw(g); | 646 | err = gk20a_init_clk_setup_hw(g); |
647 | mutex_unlock(&clk->clk_mutex); | 647 | nvgpu_mutex_release(&clk->clk_mutex); |
648 | if (err) | 648 | if (err) |
649 | return err; | 649 | return err; |
650 | 650 | ||
@@ -658,9 +658,9 @@ static int gk20a_init_clk_support(struct gk20a *g) | |||
658 | return err; | 658 | return err; |
659 | 659 | ||
660 | /* The prev call may not enable PLL if gbus is unbalanced - force it */ | 660 | /* The prev call may not enable PLL if gbus is unbalanced - force it */ |
661 | mutex_lock(&clk->clk_mutex); | 661 | nvgpu_mutex_acquire(&clk->clk_mutex); |
662 | err = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq); | 662 | err = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq); |
663 | mutex_unlock(&clk->clk_mutex); | 663 | nvgpu_mutex_release(&clk->clk_mutex); |
664 | if (err) | 664 | if (err) |
665 | return err; | 665 | return err; |
666 | 666 | ||
@@ -680,10 +680,10 @@ static int gk20a_suspend_clk_support(struct gk20a *g) | |||
680 | clk_disable(g->clk.tegra_clk); | 680 | clk_disable(g->clk.tegra_clk); |
681 | 681 | ||
682 | /* The prev call may not disable PLL if gbus is unbalanced - force it */ | 682 | /* The prev call may not disable PLL if gbus is unbalanced - force it */ |
683 | mutex_lock(&g->clk.clk_mutex); | 683 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
684 | ret = clk_disable_gpcpll(g, 1); | 684 | ret = clk_disable_gpcpll(g, 1); |
685 | g->clk.clk_hw_on = false; | 685 | g->clk.clk_hw_on = false; |
686 | mutex_unlock(&g->clk.clk_mutex); | 686 | nvgpu_mutex_release(&g->clk.clk_mutex); |
687 | return ret; | 687 | return ret; |
688 | } | 688 | } |
689 | 689 | ||
@@ -714,10 +714,10 @@ static int pll_reg_show(struct seq_file *s, void *data) | |||
714 | struct gk20a *g = s->private; | 714 | struct gk20a *g = s->private; |
715 | u32 reg, m, n, pl, f; | 715 | u32 reg, m, n, pl, f; |
716 | 716 | ||
717 | mutex_lock(&g->clk.clk_mutex); | 717 | nvgpu_mutex_acquire(&g->clk.clk_mutex); |
718 | if (!g->clk.clk_hw_on) { | 718 | if (!g->clk.clk_hw_on) { |
719 | seq_printf(s, "gk20a powered down - no access to registers\n"); | 719 | seq_printf(s, "gk20a powered down - no access to registers\n"); |
720 | mutex_unlock(&g->clk.clk_mutex); | 720 | nvgpu_mutex_release(&g->clk.clk_mutex); |
721 | return 0; | 721 | return 0; |
722 | } | 722 | } |
723 | 723 | ||
@@ -733,7 +733,7 @@ static int pll_reg_show(struct seq_file *s, void *data) | |||
733 | f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div[pl]); | 733 | f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div[pl]); |
734 | seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl); | 734 | seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl); |
735 | seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2); | 735 | seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2); |
736 | mutex_unlock(&g->clk.clk_mutex); | 736 | nvgpu_mutex_release(&g->clk.clk_mutex); |
737 | return 0; | 737 | return 0; |
738 | } | 738 | } |
739 | 739 | ||