diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2017-03-30 10:44:03 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-04-10 22:04:19 -0400 |
commit | 3ba374a5d94f8c2067731155afaf79f03e6c390c (patch) | |
tree | d8a2bd0d52b1e8862510aedeb7529944c0b7e28e /drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | |
parent | 2be51206af88aba6662cdd9de5bd6c18989bbcbd (diff) |
gpu: nvgpu: gk20a: Use new error macro
gk20a_err() and gk20a_warn() require a struct device pointer,
which is not portable across operating systems. The new nvgpu_err()
and nvgpu_warn() macros take struct gk20a pointer. Convert code
to use the more portable macros.
JIRA NVGPU-16
Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1331694
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index d7f8ceba..bc3f67c4 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <uapi/linux/nvgpu.h> | 25 | #include <uapi/linux/nvgpu.h> |
26 | 26 | ||
27 | #include <nvgpu/kmem.h> | 27 | #include <nvgpu/kmem.h> |
28 | #include <nvgpu/log.h> | ||
28 | 29 | ||
29 | #include "gk20a.h" | 30 | #include "gk20a.h" |
30 | #include "gr_gk20a.h" | 31 | #include "gr_gk20a.h" |
@@ -229,7 +230,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, | |||
229 | 230 | ||
230 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 231 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
231 | if (!ch) { | 232 | if (!ch) { |
232 | gk20a_err(dev_from_gk20a(dbg_s->g), | 233 | nvgpu_err(dbg_s->g, |
233 | "no channel bound to dbg session\n"); | 234 | "no channel bound to dbg session\n"); |
234 | return -EINVAL; | 235 | return -EINVAL; |
235 | } | 236 | } |
@@ -248,7 +249,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, | |||
248 | break; | 249 | break; |
249 | 250 | ||
250 | default: | 251 | default: |
251 | gk20a_err(dev_from_gk20a(dbg_s->g), | 252 | nvgpu_err(dbg_s->g, |
252 | "unrecognized dbg gpu events ctrl cmd: 0x%x", | 253 | "unrecognized dbg gpu events ctrl cmd: 0x%x", |
253 | args->cmd); | 254 | args->cmd); |
254 | ret = -EINVAL; | 255 | ret = -EINVAL; |
@@ -402,7 +403,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, | |||
402 | break; | 403 | break; |
403 | 404 | ||
404 | default: | 405 | default: |
405 | gk20a_err(dev_from_gk20a(g), | 406 | nvgpu_err(g, |
406 | "unrecognized dbg gpu timeout mode : 0x%x", | 407 | "unrecognized dbg gpu timeout mode : 0x%x", |
407 | timeout_mode); | 408 | timeout_mode); |
408 | err = -EINVAL; | 409 | err = -EINVAL; |
@@ -742,7 +743,7 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state( | |||
742 | write_size); | 743 | write_size); |
743 | nvgpu_mutex_release(&g->dbg_sessions_lock); | 744 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
744 | if (err) { | 745 | if (err) { |
745 | gk20a_err(dev_from_gk20a(g), "copy_to_user failed!\n"); | 746 | nvgpu_err(g, "copy_to_user failed!\n"); |
746 | return err; | 747 | return err; |
747 | } | 748 | } |
748 | 749 | ||
@@ -1099,7 +1100,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, | |||
1099 | break; | 1100 | break; |
1100 | 1101 | ||
1101 | default: | 1102 | default: |
1102 | gk20a_err(dev_from_gk20a(g), | 1103 | nvgpu_err(g, |
1103 | "unrecognized dbg gpu ioctl cmd: 0x%x", | 1104 | "unrecognized dbg gpu ioctl cmd: 0x%x", |
1104 | cmd); | 1105 | cmd); |
1105 | err = -ENOTTY; | 1106 | err = -ENOTTY; |
@@ -1146,14 +1147,13 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | |||
1146 | int err = 0, powergate_err = 0; | 1147 | int err = 0, powergate_err = 0; |
1147 | bool is_pg_disabled = false; | 1148 | bool is_pg_disabled = false; |
1148 | 1149 | ||
1149 | struct device *dev = dbg_s->dev; | ||
1150 | struct gk20a *g = dbg_s->g; | 1150 | struct gk20a *g = dbg_s->g; |
1151 | struct channel_gk20a *ch; | 1151 | struct channel_gk20a *ch; |
1152 | 1152 | ||
1153 | gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); | 1153 | gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); |
1154 | 1154 | ||
1155 | if (args->num_ops > g->gpu_characteristics.reg_ops_limit) { | 1155 | if (args->num_ops > g->gpu_characteristics.reg_ops_limit) { |
1156 | gk20a_err(dev, "regops limit exceeded"); | 1156 | nvgpu_err(g, "regops limit exceeded"); |
1157 | return -EINVAL; | 1157 | return -EINVAL; |
1158 | } | 1158 | } |
1159 | 1159 | ||
@@ -1163,25 +1163,25 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | |||
1163 | } | 1163 | } |
1164 | 1164 | ||
1165 | if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) { | 1165 | if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) { |
1166 | gk20a_err(dev, "reg ops work buffer not allocated"); | 1166 | nvgpu_err(g, "reg ops work buffer not allocated"); |
1167 | return -ENODEV; | 1167 | return -ENODEV; |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | if (!dbg_s->id) { | 1170 | if (!dbg_s->id) { |
1171 | gk20a_err(dev, "can't call reg_ops on an unbound debugger session"); | 1171 | nvgpu_err(g, "can't call reg_ops on an unbound debugger session"); |
1172 | return -EINVAL; | 1172 | return -EINVAL; |
1173 | } | 1173 | } |
1174 | 1174 | ||
1175 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 1175 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
1176 | if (!dbg_s->is_profiler && !ch) { | 1176 | if (!dbg_s->is_profiler && !ch) { |
1177 | gk20a_err(dev, "bind a channel before regops for a debugging session"); | 1177 | nvgpu_err(g, "bind a channel before regops for a debugging session"); |
1178 | return -EINVAL; | 1178 | return -EINVAL; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | /* be sure that ctx info is in place */ | 1181 | /* be sure that ctx info is in place */ |
1182 | if (!gk20a_gpu_is_virtual(dbg_s->dev) && | 1182 | if (!gk20a_gpu_is_virtual(dbg_s->dev) && |
1183 | !gr_context_info_available(dbg_s, &g->gr)) { | 1183 | !gr_context_info_available(dbg_s, &g->gr)) { |
1184 | gk20a_err(dev, "gr context data not available\n"); | 1184 | nvgpu_err(g, "gr context data not available\n"); |
1185 | return -ENODEV; | 1185 | return -ENODEV; |
1186 | } | 1186 | } |
1187 | 1187 | ||
@@ -1221,7 +1221,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | |||
1221 | 1221 | ||
1222 | if (copy_from_user(g->dbg_regops_tmp_buf, | 1222 | if (copy_from_user(g->dbg_regops_tmp_buf, |
1223 | fragment, fragment_size)) { | 1223 | fragment, fragment_size)) { |
1224 | dev_err(dev, "copy_from_user failed!"); | 1224 | nvgpu_err(g, "copy_from_user failed!"); |
1225 | err = -EFAULT; | 1225 | err = -EFAULT; |
1226 | break; | 1226 | break; |
1227 | } | 1227 | } |
@@ -1233,7 +1233,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | |||
1233 | 1233 | ||
1234 | if (copy_to_user(fragment, g->dbg_regops_tmp_buf, | 1234 | if (copy_to_user(fragment, g->dbg_regops_tmp_buf, |
1235 | fragment_size)) { | 1235 | fragment_size)) { |
1236 | dev_err(dev, "copy_to_user failed!"); | 1236 | nvgpu_err(g, "copy_to_user failed!"); |
1237 | err = -EFAULT; | 1237 | err = -EFAULT; |
1238 | break; | 1238 | break; |
1239 | } | 1239 | } |
@@ -1255,7 +1255,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | |||
1255 | err = powergate_err; | 1255 | err = powergate_err; |
1256 | 1256 | ||
1257 | if (err) | 1257 | if (err) |
1258 | gk20a_err(dev, "dbg regops failed"); | 1258 | nvgpu_err(g, "dbg regops failed"); |
1259 | 1259 | ||
1260 | return err; | 1260 | return err; |
1261 | } | 1261 | } |
@@ -1350,7 +1350,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode) | |||
1350 | break; | 1350 | break; |
1351 | 1351 | ||
1352 | default: | 1352 | default: |
1353 | gk20a_err(dev_from_gk20a(g), | 1353 | nvgpu_err(g, |
1354 | "unrecognized dbg gpu powergate mode: 0x%x", | 1354 | "unrecognized dbg gpu powergate mode: 0x%x", |
1355 | powermode); | 1355 | powermode); |
1356 | err = -ENOTTY; | 1356 | err = -ENOTTY; |
@@ -1388,7 +1388,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1388 | 1388 | ||
1389 | err = gk20a_busy(g); | 1389 | err = gk20a_busy(g); |
1390 | if (err) { | 1390 | if (err) { |
1391 | gk20a_err(dev_from_gk20a(g), "failed to poweron"); | 1391 | nvgpu_err(g, "failed to poweron"); |
1392 | return err; | 1392 | return err; |
1393 | } | 1393 | } |
1394 | 1394 | ||
@@ -1397,7 +1397,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1397 | 1397 | ||
1398 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 1398 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
1399 | if (!ch_gk20a) { | 1399 | if (!ch_gk20a) { |
1400 | gk20a_err(dev_from_gk20a(g), | 1400 | nvgpu_err(g, |
1401 | "no bound channel for smpc ctxsw mode update\n"); | 1401 | "no bound channel for smpc ctxsw mode update\n"); |
1402 | err = -EINVAL; | 1402 | err = -EINVAL; |
1403 | goto clean_up; | 1403 | goto clean_up; |
@@ -1406,7 +1406,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1406 | err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a, | 1406 | err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a, |
1407 | args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); | 1407 | args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); |
1408 | if (err) { | 1408 | if (err) { |
1409 | gk20a_err(dev_from_gk20a(g), | 1409 | nvgpu_err(g, |
1410 | "error (%d) during smpc ctxsw mode update\n", err); | 1410 | "error (%d) during smpc ctxsw mode update\n", err); |
1411 | goto clean_up; | 1411 | goto clean_up; |
1412 | } | 1412 | } |
@@ -1434,13 +1434,13 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1434 | * cleaned up. | 1434 | * cleaned up. |
1435 | */ | 1435 | */ |
1436 | if (!dbg_s->has_profiler_reservation) { | 1436 | if (!dbg_s->has_profiler_reservation) { |
1437 | gk20a_err(dev_from_gk20a(g), | 1437 | nvgpu_err(g, |
1438 | "session doesn't have a valid reservation"); | 1438 | "session doesn't have a valid reservation"); |
1439 | } | 1439 | } |
1440 | 1440 | ||
1441 | err = gk20a_busy(g); | 1441 | err = gk20a_busy(g); |
1442 | if (err) { | 1442 | if (err) { |
1443 | gk20a_err(dev_from_gk20a(g), "failed to poweron"); | 1443 | nvgpu_err(g, "failed to poweron"); |
1444 | return err; | 1444 | return err; |
1445 | } | 1445 | } |
1446 | 1446 | ||
@@ -1449,7 +1449,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1449 | 1449 | ||
1450 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 1450 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
1451 | if (!ch_gk20a) { | 1451 | if (!ch_gk20a) { |
1452 | gk20a_err(dev_from_gk20a(g), | 1452 | nvgpu_err(g, |
1453 | "no bound channel for pm ctxsw mode update\n"); | 1453 | "no bound channel for pm ctxsw mode update\n"); |
1454 | err = -EINVAL; | 1454 | err = -EINVAL; |
1455 | goto clean_up; | 1455 | goto clean_up; |
@@ -1458,7 +1458,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | |||
1458 | err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a, | 1458 | err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a, |
1459 | args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW); | 1459 | args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW); |
1460 | if (err) | 1460 | if (err) |
1461 | gk20a_err(dev_from_gk20a(g), | 1461 | nvgpu_err(g, |
1462 | "error (%d) during pm ctxsw mode update\n", err); | 1462 | "error (%d) during pm ctxsw mode update\n", err); |
1463 | 1463 | ||
1464 | /* gk20a would require a WAR to set the core PM_ENABLE bit, not | 1464 | /* gk20a would require a WAR to set the core PM_ENABLE bit, not |
@@ -1486,7 +1486,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | |||
1486 | 1486 | ||
1487 | err = gk20a_busy(g); | 1487 | err = gk20a_busy(g); |
1488 | if (err) { | 1488 | if (err) { |
1489 | gk20a_err(dev_from_gk20a(g), "failed to poweron"); | 1489 | nvgpu_err(g, "failed to poweron"); |
1490 | return err; | 1490 | return err; |
1491 | } | 1491 | } |
1492 | 1492 | ||
@@ -1495,7 +1495,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | |||
1495 | /* Suspend GPU context switching */ | 1495 | /* Suspend GPU context switching */ |
1496 | err = gr_gk20a_disable_ctxsw(g); | 1496 | err = gr_gk20a_disable_ctxsw(g); |
1497 | if (err) { | 1497 | if (err) { |
1498 | gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw"); | 1498 | nvgpu_err(g, "unable to stop gr ctxsw"); |
1499 | /* this should probably be ctx-fatal... */ | 1499 | /* this should probably be ctx-fatal... */ |
1500 | goto clean_up; | 1500 | goto clean_up; |
1501 | } | 1501 | } |
@@ -1512,7 +1512,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | |||
1512 | 1512 | ||
1513 | err = gr_gk20a_enable_ctxsw(g); | 1513 | err = gr_gk20a_enable_ctxsw(g); |
1514 | if (err) | 1514 | if (err) |
1515 | gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n"); | 1515 | nvgpu_err(g, "unable to restart ctxsw!\n"); |
1516 | 1516 | ||
1517 | clean_up: | 1517 | clean_up: |
1518 | nvgpu_mutex_release(&g->dbg_sessions_lock); | 1518 | nvgpu_mutex_release(&g->dbg_sessions_lock); |
@@ -1544,7 +1544,7 @@ static int nvgpu_ioctl_allocate_profiler_object( | |||
1544 | else { | 1544 | else { |
1545 | prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | 1545 | prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); |
1546 | if (prof_obj->ch == NULL) { | 1546 | if (prof_obj->ch == NULL) { |
1547 | gk20a_err(dev_from_gk20a(g), | 1547 | nvgpu_err(g, |
1548 | "bind a channel for dbg session"); | 1548 | "bind a channel for dbg session"); |
1549 | nvgpu_kfree(g, prof_obj); | 1549 | nvgpu_kfree(g, prof_obj); |
1550 | err = -EINVAL; | 1550 | err = -EINVAL; |
@@ -1582,7 +1582,7 @@ static int nvgpu_ioctl_free_profiler_object( | |||
1582 | dbg_profiler_object_data, prof_obj_entry) { | 1582 | dbg_profiler_object_data, prof_obj_entry) { |
1583 | if (prof_obj->prof_handle == args->profiler_handle) { | 1583 | if (prof_obj->prof_handle == args->profiler_handle) { |
1584 | if (prof_obj->session_id != dbg_s->id) { | 1584 | if (prof_obj->session_id != dbg_s->id) { |
1585 | gk20a_err(dev_from_gk20a(g), | 1585 | nvgpu_err(g, |
1586 | "invalid handle %x", | 1586 | "invalid handle %x", |
1587 | args->profiler_handle); | 1587 | args->profiler_handle); |
1588 | err = -EINVAL; | 1588 | err = -EINVAL; |
@@ -1598,7 +1598,7 @@ static int nvgpu_ioctl_free_profiler_object( | |||
1598 | } | 1598 | } |
1599 | } | 1599 | } |
1600 | if (!obj_found) { | 1600 | if (!obj_found) { |
1601 | gk20a_err(dev_from_gk20a(g), "profiler %x not found", | 1601 | nvgpu_err(g, "profiler %x not found", |
1602 | args->profiler_handle); | 1602 | args->profiler_handle); |
1603 | err = -EINVAL; | 1603 | err = -EINVAL; |
1604 | } | 1604 | } |
@@ -1618,7 +1618,7 @@ static struct dbg_profiler_object_data *find_matching_prof_obj( | |||
1618 | dbg_profiler_object_data, prof_obj_entry) { | 1618 | dbg_profiler_object_data, prof_obj_entry) { |
1619 | if (prof_obj->prof_handle == profiler_handle) { | 1619 | if (prof_obj->prof_handle == profiler_handle) { |
1620 | if (prof_obj->session_id != dbg_s->id) { | 1620 | if (prof_obj->session_id != dbg_s->id) { |
1621 | gk20a_err(dev_from_gk20a(g), | 1621 | nvgpu_err(g, |
1622 | "invalid handle %x", | 1622 | "invalid handle %x", |
1623 | profiler_handle); | 1623 | profiler_handle); |
1624 | return NULL; | 1624 | return NULL; |
@@ -1667,7 +1667,7 @@ static void nvgpu_release_profiler_reservation(struct dbg_session_gk20a *dbg_s, | |||
1667 | 1667 | ||
1668 | g->profiler_reservation_count--; | 1668 | g->profiler_reservation_count--; |
1669 | if (g->profiler_reservation_count < 0) | 1669 | if (g->profiler_reservation_count < 0) |
1670 | gk20a_err(dev_from_gk20a(g), "Negative reservation count!"); | 1670 | nvgpu_err(g, "Negative reservation count!"); |
1671 | dbg_s->has_profiler_reservation = false; | 1671 | dbg_s->has_profiler_reservation = false; |
1672 | prof_obj->has_reservation = false; | 1672 | prof_obj->has_reservation = false; |
1673 | if (prof_obj->ch == NULL) | 1673 | if (prof_obj->ch == NULL) |
@@ -1684,7 +1684,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | |||
1684 | gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); | 1684 | gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); |
1685 | 1685 | ||
1686 | if (g->profiler_reservation_count < 0) { | 1686 | if (g->profiler_reservation_count < 0) { |
1687 | gk20a_err(dev_from_gk20a(g), "Negative reservation count!"); | 1687 | nvgpu_err(g, "Negative reservation count!"); |
1688 | return -EINVAL; | 1688 | return -EINVAL; |
1689 | } | 1689 | } |
1690 | 1690 | ||
@@ -1694,7 +1694,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | |||
1694 | my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); | 1694 | my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); |
1695 | 1695 | ||
1696 | if (!my_prof_obj) { | 1696 | if (!my_prof_obj) { |
1697 | gk20a_err(dev_from_gk20a(g), "object not found"); | 1697 | nvgpu_err(g, "object not found"); |
1698 | err = -EINVAL; | 1698 | err = -EINVAL; |
1699 | goto exit; | 1699 | goto exit; |
1700 | } | 1700 | } |
@@ -1711,7 +1711,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | |||
1711 | */ | 1711 | */ |
1712 | if (!g->ops.dbg_session_ops.check_and_set_global_reservation( | 1712 | if (!g->ops.dbg_session_ops.check_and_set_global_reservation( |
1713 | dbg_s, my_prof_obj)) { | 1713 | dbg_s, my_prof_obj)) { |
1714 | gk20a_err(dev_from_gk20a(g), | 1714 | nvgpu_err(g, |
1715 | "global reserve: have existing reservation"); | 1715 | "global reserve: have existing reservation"); |
1716 | err = -EBUSY; | 1716 | err = -EBUSY; |
1717 | } | 1717 | } |
@@ -1719,7 +1719,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | |||
1719 | /* If there's a global reservation, | 1719 | /* If there's a global reservation, |
1720 | * we can't take a per-context one. | 1720 | * we can't take a per-context one. |
1721 | */ | 1721 | */ |
1722 | gk20a_err(dev_from_gk20a(g), | 1722 | nvgpu_err(g, |
1723 | "per-ctxt reserve: global reservation in effect"); | 1723 | "per-ctxt reserve: global reservation in effect"); |
1724 | err = -EBUSY; | 1724 | err = -EBUSY; |
1725 | } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) { | 1725 | } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) { |
@@ -1732,7 +1732,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | |||
1732 | dbg_profiler_object_data, prof_obj_entry) { | 1732 | dbg_profiler_object_data, prof_obj_entry) { |
1733 | if (prof_obj->has_reservation && | 1733 | if (prof_obj->has_reservation && |
1734 | (prof_obj->ch->tsgid == my_tsgid)) { | 1734 | (prof_obj->ch->tsgid == my_tsgid)) { |
1735 | gk20a_err(dev_from_gk20a(g), | 1735 | nvgpu_err(g, |
1736 | "per-ctxt reserve (tsg): already reserved"); | 1736 | "per-ctxt reserve (tsg): already reserved"); |
1737 | err = -EBUSY; | 1737 | err = -EBUSY; |
1738 | goto exit; | 1738 | goto exit; |
@@ -1742,7 +1742,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | |||
1742 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( | 1742 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( |
1743 | dbg_s, my_prof_obj)) { | 1743 | dbg_s, my_prof_obj)) { |
1744 | /* Another guest OS has the global reservation */ | 1744 | /* Another guest OS has the global reservation */ |
1745 | gk20a_err(dev_from_gk20a(g), | 1745 | nvgpu_err(g, |
1746 | "per-ctxt reserve: global reservation in effect"); | 1746 | "per-ctxt reserve: global reservation in effect"); |
1747 | err = -EBUSY; | 1747 | err = -EBUSY; |
1748 | } | 1748 | } |
@@ -1756,7 +1756,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | |||
1756 | dbg_profiler_object_data, prof_obj_entry) { | 1756 | dbg_profiler_object_data, prof_obj_entry) { |
1757 | if (prof_obj->has_reservation && | 1757 | if (prof_obj->has_reservation && |
1758 | (prof_obj->ch == my_ch)) { | 1758 | (prof_obj->ch == my_ch)) { |
1759 | gk20a_err(dev_from_gk20a(g), | 1759 | nvgpu_err(g, |
1760 | "per-ctxt reserve (ch): already reserved"); | 1760 | "per-ctxt reserve (ch): already reserved"); |
1761 | err = -EBUSY; | 1761 | err = -EBUSY; |
1762 | goto exit; | 1762 | goto exit; |
@@ -1766,7 +1766,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | |||
1766 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( | 1766 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( |
1767 | dbg_s, my_prof_obj)) { | 1767 | dbg_s, my_prof_obj)) { |
1768 | /* Another guest OS has the global reservation */ | 1768 | /* Another guest OS has the global reservation */ |
1769 | gk20a_err(dev_from_gk20a(g), | 1769 | nvgpu_err(g, |
1770 | "per-ctxt reserve: global reservation in effect"); | 1770 | "per-ctxt reserve: global reservation in effect"); |
1771 | err = -EBUSY; | 1771 | err = -EBUSY; |
1772 | } | 1772 | } |
@@ -1791,7 +1791,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, | |||
1791 | prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); | 1791 | prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); |
1792 | 1792 | ||
1793 | if (!prof_obj) { | 1793 | if (!prof_obj) { |
1794 | gk20a_err(dev_from_gk20a(g), "object not found"); | 1794 | nvgpu_err(g, "object not found"); |
1795 | err = -EINVAL; | 1795 | err = -EINVAL; |
1796 | goto exit; | 1796 | goto exit; |
1797 | } | 1797 | } |
@@ -1799,7 +1799,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, | |||
1799 | if (prof_obj->has_reservation) | 1799 | if (prof_obj->has_reservation) |
1800 | g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj); | 1800 | g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj); |
1801 | else { | 1801 | else { |
1802 | gk20a_err(dev_from_gk20a(g), "No reservation found"); | 1802 | nvgpu_err(g, "No reservation found"); |
1803 | err = -EINVAL; | 1803 | err = -EINVAL; |
1804 | goto exit; | 1804 | goto exit; |
1805 | } | 1805 | } |
@@ -1854,7 +1854,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | |||
1854 | 1854 | ||
1855 | err = gk20a_busy(g); | 1855 | err = gk20a_busy(g); |
1856 | if (err) { | 1856 | if (err) { |
1857 | gk20a_err(dev_from_gk20a(g), "failed to poweron"); | 1857 | nvgpu_err(g, "failed to poweron"); |
1858 | goto fail_unmap; | 1858 | goto fail_unmap; |
1859 | } | 1859 | } |
1860 | 1860 | ||
@@ -1895,7 +1895,7 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s, | |||
1895 | 1895 | ||
1896 | err = gk20a_busy(g); | 1896 | err = gk20a_busy(g); |
1897 | if (err) { | 1897 | if (err) { |
1898 | gk20a_err(dev_from_gk20a(g), "failed to poweron"); | 1898 | nvgpu_err(g, "failed to poweron"); |
1899 | return err; | 1899 | return err; |
1900 | } | 1900 | } |
1901 | 1901 | ||