summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-06-21 13:58:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-27 06:58:21 -0400
commit3a2eb257eefbd6c2c5943f4aaa10f3cee7adfad1 (patch)
treef217bcabe6a94840e7bb5f5ce8df28746f0b8bcb /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parentfe3fc43401662e0835228ce47d11014318d06b65 (diff)
gpu: nvgpu: use nvgpu_flcn_copy_to_dmem()
- replace usage of pmu_copy_to_dmem() with nvgpu_flcn_copy_to_dmem() - delete nvgpu_flcn_copy_to_dmem() JIRA NVGPU-99 Change-Id: I9bb5837556e144521b181f9e15731beee08b435a Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master/r/1506577 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c53
1 files changed, 1 insertions, 52 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index ce965992..4a676b82 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -104,57 +104,6 @@ static void printtrace(struct nvgpu_pmu *pmu)
104 nvgpu_kfree(g, tracebuffer); 104 nvgpu_kfree(g, tracebuffer);
105} 105}
106 106
107void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
108 u32 dst, u8 *src, u32 size, u8 port)
109{
110 struct gk20a *g = gk20a_from_pmu(pmu);
111 u32 i, words, bytes;
112 u32 data, addr_mask;
113 u32 *src_u32 = (u32*)src;
114
115 if (size == 0) {
116 nvgpu_err(g, "size is zero");
117 return;
118 }
119
120 if (dst & 0x3) {
121 nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst);
122 return;
123 }
124
125 nvgpu_mutex_acquire(&pmu->pmu_copy_lock);
126
127 words = size >> 2;
128 bytes = size & 0x3;
129
130 addr_mask = pwr_falcon_dmemc_offs_m() |
131 pwr_falcon_dmemc_blk_m();
132
133 dst &= addr_mask;
134
135 gk20a_writel(g, pwr_falcon_dmemc_r(port),
136 dst | pwr_falcon_dmemc_aincw_f(1));
137
138 for (i = 0; i < words; i++)
139 gk20a_writel(g, pwr_falcon_dmemd_r(port), src_u32[i]);
140
141 if (bytes > 0) {
142 data = 0;
143 for (i = 0; i < bytes; i++)
144 ((u8 *)&data)[i] = src[(words << 2) + i];
145 gk20a_writel(g, pwr_falcon_dmemd_r(port), data);
146 }
147
148 data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask;
149 size = ALIGN(size, 4);
150 if (data != ((dst + size) & addr_mask)) {
151 nvgpu_err(g, "copy failed. bytes written %d, expected %d",
152 data - dst, size);
153 }
154 nvgpu_mutex_release(&pmu->pmu_copy_lock);
155 return;
156}
157
158void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) 107void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
159{ 108{
160 struct gk20a *g = gk20a_from_pmu(pmu); 109 struct gk20a *g = gk20a_from_pmu(pmu);
@@ -319,7 +268,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
319 << GK20A_PMU_DMEM_BLKSIZE2) - 268 << GK20A_PMU_DMEM_BLKSIZE2) -
320 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); 269 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
321 270
322 pmu_copy_to_dmem(pmu, addr_args, 271 nvgpu_flcn_copy_to_dmem(pmu->flcn, addr_args,
323 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), 272 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
324 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); 273 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
325 274