diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2017-04-06 14:01:46 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-04-07 16:48:18 -0400 |
commit | bb72b7e2ed215b26e1d9b94534c24ab4cfa52801 (patch) | |
tree | 2a8e781838616f4640d7ef3e5c04ae7d996e0ecf /drivers/gpu/nvgpu/gm20b/acr_gm20b.c | |
parent | 85f27cec5d010d4ac0f4646f11f145d4b3a122e4 (diff) |
gpu: nvgpu: gm20b: Use new error macros
gk20a_err() and gk20a_warn() require a struct device pointer,
which is not portable across operating systems. The new nvgpu_err()
and nvgpu_warn() macros take struct gk20a pointer. Convert code
to use the more portable macros.
JIRA NVGPU-16
Change-Id: Ic27fb98e03a982e5a1cf672cb4e8f87ecea10a5b
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1457345
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 30 |
1 files changed, 14 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index 181e5301..386b266d 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c | |||
@@ -134,7 +134,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
134 | gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); | 134 | gm20b_dbg_pmu("requesting PMU ucode in GM20B\n"); |
135 | pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); | 135 | pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0); |
136 | if (!pmu_fw) { | 136 | if (!pmu_fw) { |
137 | gk20a_err(dev_from_gk20a(g), "failed to load pmu ucode!!"); | 137 | nvgpu_err(g, "failed to load pmu ucode!!"); |
138 | return -ENOENT; | 138 | return -ENOENT; |
139 | } | 139 | } |
140 | g->acr.pmu_fw = pmu_fw; | 140 | g->acr.pmu_fw = pmu_fw; |
@@ -143,13 +143,13 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
143 | gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n"); | 143 | gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n"); |
144 | pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); | 144 | pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0); |
145 | if (!pmu_desc) { | 145 | if (!pmu_desc) { |
146 | gk20a_err(dev_from_gk20a(g), "failed to load pmu ucode desc!!"); | 146 | nvgpu_err(g, "failed to load pmu ucode desc!!"); |
147 | err = -ENOENT; | 147 | err = -ENOENT; |
148 | goto release_img_fw; | 148 | goto release_img_fw; |
149 | } | 149 | } |
150 | pmu_sig = nvgpu_request_firmware(g, GM20B_PMU_UCODE_SIG, 0); | 150 | pmu_sig = nvgpu_request_firmware(g, GM20B_PMU_UCODE_SIG, 0); |
151 | if (!pmu_sig) { | 151 | if (!pmu_sig) { |
152 | gk20a_err(dev_from_gk20a(g), "failed to load pmu sig!!"); | 152 | nvgpu_err(g, "failed to load pmu sig!!"); |
153 | err = -ENOENT; | 153 | err = -ENOENT; |
154 | goto release_desc; | 154 | goto release_desc; |
155 | } | 155 | } |
@@ -197,7 +197,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
197 | 197 | ||
198 | fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0); | 198 | fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0); |
199 | if (!fecs_sig) { | 199 | if (!fecs_sig) { |
200 | gk20a_err(dev_from_gk20a(g), "failed to load fecs sig"); | 200 | nvgpu_err(g, "failed to load fecs sig"); |
201 | return -ENOENT; | 201 | return -ENOENT; |
202 | } | 202 | } |
203 | lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc)); | 203 | lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc)); |
@@ -267,7 +267,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
267 | 267 | ||
268 | gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0); | 268 | gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0); |
269 | if (!gpccs_sig) { | 269 | if (!gpccs_sig) { |
270 | gk20a_err(dev_from_gk20a(g), "failed to load gpccs sig"); | 270 | nvgpu_err(g, "failed to load gpccs sig"); |
271 | return -ENOENT; | 271 | return -ENOENT; |
272 | } | 272 | } |
273 | lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc)); | 273 | lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc)); |
@@ -412,12 +412,12 @@ int prepare_ucode_blob(struct gk20a *g) | |||
412 | 412 | ||
413 | sgt = nvgpu_kzalloc(g, sizeof(*sgt)); | 413 | sgt = nvgpu_kzalloc(g, sizeof(*sgt)); |
414 | if (!sgt) { | 414 | if (!sgt) { |
415 | gk20a_err(dev_from_gk20a(g), "failed to allocate memory\n"); | 415 | nvgpu_err(g, "failed to allocate memory"); |
416 | return -ENOMEM; | 416 | return -ENOMEM; |
417 | } | 417 | } |
418 | err = sg_alloc_table(sgt, 1, GFP_KERNEL); | 418 | err = sg_alloc_table(sgt, 1, GFP_KERNEL); |
419 | if (err) { | 419 | if (err) { |
420 | gk20a_err(dev_from_gk20a(g), "failed to allocate sg_table\n"); | 420 | nvgpu_err(g, "failed to allocate sg_table"); |
421 | goto free_sgt; | 421 | goto free_sgt; |
422 | } | 422 | } |
423 | page = phys_to_page(wpr_addr); | 423 | page = phys_to_page(wpr_addr); |
@@ -1088,7 +1088,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g) | |||
1088 | /*First time init case*/ | 1088 | /*First time init case*/ |
1089 | acr_fw = nvgpu_request_firmware(g, GM20B_HSBIN_PMU_UCODE_IMAGE, 0); | 1089 | acr_fw = nvgpu_request_firmware(g, GM20B_HSBIN_PMU_UCODE_IMAGE, 0); |
1090 | if (!acr_fw) { | 1090 | if (!acr_fw) { |
1091 | gk20a_err(dev_from_gk20a(g), "pmu ucode get fail"); | 1091 | nvgpu_err(g, "pmu ucode get fail"); |
1092 | return -ENOENT; | 1092 | return -ENOENT; |
1093 | } | 1093 | } |
1094 | acr->acr_fw = acr_fw; | 1094 | acr->acr_fw = acr_fw; |
@@ -1111,7 +1111,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g) | |||
1111 | acr->fw_hdr->patch_loc), | 1111 | acr->fw_hdr->patch_loc), |
1112 | (u32 *)(acr_fw->data + | 1112 | (u32 *)(acr_fw->data + |
1113 | acr->fw_hdr->patch_sig)) < 0) { | 1113 | acr->fw_hdr->patch_sig)) < 0) { |
1114 | gk20a_err(dev_from_gk20a(g), "patch signatures fail"); | 1114 | nvgpu_err(g, "patch signatures fail"); |
1115 | err = -1; | 1115 | err = -1; |
1116 | goto err_release_acr_fw; | 1116 | goto err_release_acr_fw; |
1117 | } | 1117 | } |
@@ -1386,7 +1386,6 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1386 | { | 1386 | { |
1387 | struct mm_gk20a *mm = &g->mm; | 1387 | struct mm_gk20a *mm = &g->mm; |
1388 | struct vm_gk20a *vm = &mm->pmu.vm; | 1388 | struct vm_gk20a *vm = &mm->pmu.vm; |
1389 | struct device *d = dev_from_gk20a(g); | ||
1390 | int err = 0; | 1389 | int err = 0; |
1391 | u32 bl_sz; | 1390 | u32 bl_sz; |
1392 | struct acr_desc *acr = &g->acr; | 1391 | struct acr_desc *acr = &g->acr; |
@@ -1399,7 +1398,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1399 | hsbl_fw = nvgpu_request_firmware(g, | 1398 | hsbl_fw = nvgpu_request_firmware(g, |
1400 | GM20B_HSBIN_PMU_BL_UCODE_IMAGE, 0); | 1399 | GM20B_HSBIN_PMU_BL_UCODE_IMAGE, 0); |
1401 | if (!hsbl_fw) { | 1400 | if (!hsbl_fw) { |
1402 | gk20a_err(dev_from_gk20a(g), "pmu ucode load fail"); | 1401 | nvgpu_err(g, "pmu ucode load fail"); |
1403 | return -ENOENT; | 1402 | return -ENOENT; |
1404 | } | 1403 | } |
1405 | acr->hsbl_fw = hsbl_fw; | 1404 | acr->hsbl_fw = hsbl_fw; |
@@ -1420,7 +1419,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1420 | err = nvgpu_dma_alloc_flags_sys(g, | 1419 | err = nvgpu_dma_alloc_flags_sys(g, |
1421 | NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode); | 1420 | NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode); |
1422 | if (err) { | 1421 | if (err) { |
1423 | gk20a_err(d, "failed to allocate memory\n"); | 1422 | nvgpu_err(g, "failed to allocate memory\n"); |
1424 | goto err_done; | 1423 | goto err_done; |
1425 | } | 1424 | } |
1426 | 1425 | ||
@@ -1430,7 +1429,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt) | |||
1430 | gk20a_mem_flag_read_only, false, | 1429 | gk20a_mem_flag_read_only, false, |
1431 | acr->hsbl_ucode.aperture); | 1430 | acr->hsbl_ucode.aperture); |
1432 | if (!acr->hsbl_ucode.gpu_va) { | 1431 | if (!acr->hsbl_ucode.gpu_va) { |
1433 | gk20a_err(d, "failed to map pmu ucode memory!!"); | 1432 | nvgpu_err(g, "failed to map pmu ucode memory!!"); |
1434 | goto err_free_ucode; | 1433 | goto err_free_ucode; |
1435 | } | 1434 | } |
1436 | 1435 | ||
@@ -1506,7 +1505,7 @@ static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms) | |||
1506 | } while (!nvgpu_timeout_expired(&timeout)); | 1505 | } while (!nvgpu_timeout_expired(&timeout)); |
1507 | 1506 | ||
1508 | if (ret) { | 1507 | if (ret) { |
1509 | gk20a_err(dev_from_gk20a(g), "ACR boot timed out"); | 1508 | nvgpu_err(g, "ACR boot timed out"); |
1510 | return ret; | 1509 | return ret; |
1511 | } | 1510 | } |
1512 | 1511 | ||
@@ -1514,8 +1513,7 @@ static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms) | |||
1514 | gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); | 1513 | gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities); |
1515 | data = gk20a_readl(g, pwr_falcon_mailbox0_r()); | 1514 | data = gk20a_readl(g, pwr_falcon_mailbox0_r()); |
1516 | if (data) { | 1515 | if (data) { |
1517 | gk20a_err(dev_from_gk20a(g), | 1516 | nvgpu_err(g, "ACR boot failed, err %x", data); |
1518 | "ACR boot failed, err %x", data); | ||
1519 | ret = -EAGAIN; | 1517 | ret = -EAGAIN; |
1520 | } | 1518 | } |
1521 | 1519 | ||