summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/vgpu.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-06 16:10:30 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 15:24:27 -0400
commit5405070ecd27ce462babc1dff231fec5cd8bd6b7 (patch)
tree903461959633aec359b5bb3f4f660c5dcb6bdbcf /drivers/gpu/nvgpu/vgpu/vgpu.c
parent3a1104c3699b05201abf48ed9283bb8ccbe42732 (diff)
gpu: nvgpu: vgpu: Use new error macros
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: I071e8c50959bfa81730ca964d912bc69f9c7e6ad Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1457355 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index b32df08d..4cb7c52e 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -106,7 +106,7 @@ static void vgpu_handle_channel_event(struct gk20a *g,
106{ 106{
107 if (info->id >= g->fifo.num_channels || 107 if (info->id >= g->fifo.num_channels ||
108 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) { 108 info->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX) {
109 gk20a_err(g->dev, "invalid channel event"); 109 nvgpu_err(g, "invalid channel event");
110 return; 110 return;
111 } 111 }
112 112
@@ -118,7 +118,7 @@ static void vgpu_handle_channel_event(struct gk20a *g,
118 struct channel_gk20a *ch = &g->fifo.channel[info->id]; 118 struct channel_gk20a *ch = &g->fifo.channel[info->id];
119 119
120 if (!gk20a_channel_get(ch)) { 120 if (!gk20a_channel_get(ch)) {
121 gk20a_err(g->dev, "invalid channel %d for event %d", 121 nvgpu_err(g, "invalid channel %d for event %d",
122 (int)info->id, (int)info->event_id); 122 (int)info->id, (int)info->event_id);
123 return; 123 return;
124 } 124 }
@@ -179,7 +179,7 @@ static int vgpu_intr_thread(void *dev_id)
179 vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr); 179 vgpu_gr_handle_sm_esr_event(g, &msg->info.sm_esr);
180 break; 180 break;
181 default: 181 default:
182 gk20a_err(g->dev, "unknown event %u", msg->event); 182 nvgpu_err(g, "unknown event %u", msg->event);
183 break; 183 break;
184 } 184 }
185 185
@@ -349,8 +349,7 @@ static int vgpu_read_ptimer(struct gk20a *g, u64 *value)
349 if (!err) 349 if (!err)
350 *value = p->time; 350 *value = p->time;
351 else 351 else
352 gk20a_err(dev_from_gk20a(g), 352 nvgpu_err(g, "vgpu read ptimer failed, err=%d", err);
353 "vgpu read ptimer failed, err=%d", err);
354 353
355 return err; 354 return err;
356} 355}
@@ -393,7 +392,7 @@ static int vgpu_init_hal(struct gk20a *g)
393 err = vgpu_gp10b_init_hal(g); 392 err = vgpu_gp10b_init_hal(g);
394 break; 393 break;
395 default: 394 default:
396 gk20a_err(g->dev, "no support for %x", ver); 395 nvgpu_err(g, "no support for %x", ver);
397 err = -ENODEV; 396 err = -ENODEV;
398 break; 397 break;
399 } 398 }
@@ -423,25 +422,25 @@ int vgpu_pm_finalize_poweron(struct device *dev)
423 422
424 err = vgpu_init_mm_support(g); 423 err = vgpu_init_mm_support(g);
425 if (err) { 424 if (err) {
426 gk20a_err(dev, "failed to init gk20a mm"); 425 nvgpu_err(g, "failed to init gk20a mm");
427 goto done; 426 goto done;
428 } 427 }
429 428
430 err = vgpu_init_fifo_support(g); 429 err = vgpu_init_fifo_support(g);
431 if (err) { 430 if (err) {
432 gk20a_err(dev, "failed to init gk20a fifo"); 431 nvgpu_err(g, "failed to init gk20a fifo");
433 goto done; 432 goto done;
434 } 433 }
435 434
436 err = vgpu_init_gr_support(g); 435 err = vgpu_init_gr_support(g);
437 if (err) { 436 if (err) {
438 gk20a_err(dev, "failed to init gk20a gr"); 437 nvgpu_err(g, "failed to init gk20a gr");
439 goto done; 438 goto done;
440 } 439 }
441 440
442 err = g->ops.chip_init_gpu_characteristics(g); 441 err = g->ops.chip_init_gpu_characteristics(g);
443 if (err) { 442 if (err) {
444 gk20a_err(dev, "failed to init gk20a gpu characteristics"); 443 nvgpu_err(g, "failed to init gk20a gpu characteristics");
445 goto done; 444 goto done;
446 } 445 }
447 446
@@ -459,6 +458,7 @@ static int vgpu_qos_notify(struct notifier_block *nb,
459 struct gk20a_scale_profile *profile = 458 struct gk20a_scale_profile *profile =
460 container_of(nb, struct gk20a_scale_profile, 459 container_of(nb, struct gk20a_scale_profile,
461 qos_notify_block); 460 qos_notify_block);
461 struct gk20a *g = get_gk20a(profile->dev);
462 struct tegra_vgpu_cmd_msg msg = {}; 462 struct tegra_vgpu_cmd_msg msg = {};
463 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate; 463 struct tegra_vgpu_gpu_clk_rate_params *p = &msg.params.gpu_clk_rate;
464 u32 max_freq; 464 u32 max_freq;
@@ -474,7 +474,7 @@ static int vgpu_qos_notify(struct notifier_block *nb,
474 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 474 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
475 err = err ? err : msg.ret; 475 err = err ? err : msg.ret;
476 if (err) 476 if (err)
477 gk20a_err(profile->dev, "%s failed, err=%d", __func__, err); 477 nvgpu_err(g, "%s failed, err=%d", __func__, err);
478 478
479 return NOTIFY_OK; /* need notify call further */ 479 return NOTIFY_OK; /* need notify call further */
480} 480}
@@ -536,13 +536,13 @@ static int vgpu_get_constants(struct gk20a *g)
536 err = err ? err : msg.ret; 536 err = err ? err : msg.ret;
537 537
538 if (unlikely(err)) { 538 if (unlikely(err)) {
539 gk20a_err(g->dev, "%s failed, err=%d", __func__, err); 539 nvgpu_err(g, "%s failed, err=%d", __func__, err);
540 return err; 540 return err;
541 } 541 }
542 542
543 if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT || 543 if (unlikely(p->gpc_count > TEGRA_VGPU_MAX_GPC_COUNT ||
544 p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) { 544 p->max_tpc_per_gpc_count > TEGRA_VGPU_MAX_TPC_COUNT_PER_GPC)) {
545 gk20a_err(g->dev, "gpc_count %d max_tpc_per_gpc %d overflow", 545 nvgpu_err(g, "gpc_count %d max_tpc_per_gpc %d overflow",
546 (int)p->gpc_count, (int)p->max_tpc_per_gpc_count); 546 (int)p->gpc_count, (int)p->max_tpc_per_gpc_count);
547 return -EINVAL; 547 return -EINVAL;
548 } 548 }