summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-30 10:44:03 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:19 -0400
commit3ba374a5d94f8c2067731155afaf79f03e6c390c (patch)
treed8a2bd0d52b1e8862510aedeb7529944c0b7e28e /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parent2be51206af88aba6662cdd9de5bd6c18989bbcbd (diff)
gpu: nvgpu: gk20a: Use new error macro
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1331694 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c119
1 files changed, 54 insertions, 65 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index ab3dc3f9..78332ee7 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -39,6 +39,7 @@
39#include <nvgpu/allocator.h> 39#include <nvgpu/allocator.h>
40#include <nvgpu/semaphore.h> 40#include <nvgpu/semaphore.h>
41#include <nvgpu/page_allocator.h> 41#include <nvgpu/page_allocator.h>
42#include <nvgpu/log.h>
42 43
43#include "gk20a.h" 44#include "gk20a.h"
44#include "mm_gk20a.h" 45#include "mm_gk20a.h"
@@ -536,7 +537,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
536 0, 537 0,
537 NULL); 538 NULL);
538 if (err) { 539 if (err) {
539 gk20a_err(g->dev, 540 nvgpu_err(g,
540 "Failed to clear vidmem region 1 : %d", err); 541 "Failed to clear vidmem region 1 : %d", err);
541 return err; 542 return err;
542 } 543 }
@@ -555,7 +556,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
555 0, 556 0,
556 &gk20a_fence_out); 557 &gk20a_fence_out);
557 if (err) { 558 if (err) {
558 gk20a_err(g->dev, 559 nvgpu_err(g,
559 "Failed to clear vidmem region 2 : %d", err); 560 "Failed to clear vidmem region 2 : %d", err);
560 return err; 561 return err;
561 } 562 }
@@ -575,7 +576,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
575 576
576 gk20a_fence_put(gk20a_fence_out); 577 gk20a_fence_put(gk20a_fence_out);
577 if (err) { 578 if (err) {
578 gk20a_err(g->dev, 579 nvgpu_err(g,
579 "fence wait failed for CE execute ops"); 580 "fence wait failed for CE execute ops");
580 return err; 581 return err;
581 } 582 }
@@ -591,7 +592,6 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
591{ 592{
592#if defined(CONFIG_GK20A_VIDMEM) 593#if defined(CONFIG_GK20A_VIDMEM)
593 struct gk20a *g = mm->g; 594 struct gk20a *g = mm->g;
594 struct device *d = dev_from_gk20a(g);
595 size_t size = g->ops.mm.get_vidmem_size ? 595 size_t size = g->ops.mm.get_vidmem_size ?
596 g->ops.mm.get_vidmem_size(g) : 0; 596 g->ops.mm.get_vidmem_size(g) : 0;
597 u64 bootstrap_base, bootstrap_size, base; 597 u64 bootstrap_base, bootstrap_size, base;
@@ -625,7 +625,7 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
625 default_page_size, 625 default_page_size,
626 GPU_ALLOC_4K_VIDMEM_PAGES); 626 GPU_ALLOC_4K_VIDMEM_PAGES);
627 if (err) { 627 if (err) {
628 gk20a_err(d, "Failed to register vidmem for size %zu: %d", 628 nvgpu_err(g, "Failed to register vidmem for size %zu: %d",
629 size, err); 629 size, err);
630 return err; 630 return err;
631 } 631 }
@@ -796,7 +796,7 @@ void gk20a_init_mm_ce_context(struct gk20a *g)
796 NULL); 796 NULL);
797 797
798 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 798 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
799 gk20a_err(g->dev, 799 nvgpu_err(g,
800 "Failed to allocate CE context for vidmem page clearing support"); 800 "Failed to allocate CE context for vidmem page clearing support");
801 } 801 }
802#endif 802#endif
@@ -882,7 +882,6 @@ static void unmap_gmmu_phys_pages(struct gk20a_mm_entry *entry)
882static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, 882static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
883 struct gk20a_mm_entry *entry) 883 struct gk20a_mm_entry *entry)
884{ 884{
885 struct device *d = dev_from_vm(vm);
886 struct gk20a *g = gk20a_from_vm(vm); 885 struct gk20a *g = gk20a_from_vm(vm);
887 u32 num_pages = 1 << order; 886 u32 num_pages = 1 << order;
888 u32 len = num_pages * PAGE_SIZE; 887 u32 len = num_pages * PAGE_SIZE;
@@ -905,7 +904,7 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
905 904
906 905
907 if (err) { 906 if (err) {
908 gk20a_err(d, "memory allocation failed"); 907 nvgpu_err(g, "memory allocation failed");
909 return -ENOMEM; 908 return -ENOMEM;
910 } 909 }
911 910
@@ -1209,7 +1208,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1209static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, 1208static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1210 struct vm_gk20a_mapping_batch *batch) 1209 struct vm_gk20a_mapping_batch *batch)
1211{ 1210{
1212 struct device *d = dev_from_vm(vm); 1211 struct gk20a *g = vm->mm->g;
1213 struct mapped_buffer_node *mapped_buffer; 1212 struct mapped_buffer_node *mapped_buffer;
1214 1213
1215 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 1214 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
@@ -1217,7 +1216,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1217 mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset); 1216 mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset);
1218 if (!mapped_buffer) { 1217 if (!mapped_buffer) {
1219 nvgpu_mutex_release(&vm->update_gmmu_lock); 1218 nvgpu_mutex_release(&vm->update_gmmu_lock);
1220 gk20a_err(d, "invalid addr to unmap 0x%llx", offset); 1219 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
1221 return; 1220 return;
1222 } 1221 }
1223 1222
@@ -1240,7 +1239,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1240 1239
1241 if (mapped_buffer->user_mapped == 0) { 1240 if (mapped_buffer->user_mapped == 0) {
1242 nvgpu_mutex_release(&vm->update_gmmu_lock); 1241 nvgpu_mutex_release(&vm->update_gmmu_lock);
1243 gk20a_err(d, "addr already unmapped from user 0x%llx", offset); 1242 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
1244 return; 1243 return;
1245 } 1244 }
1246 1245
@@ -1284,7 +1283,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
1284 1283
1285 offset = nvgpu_alloc(vma, size); 1284 offset = nvgpu_alloc(vma, size);
1286 if (!offset) { 1285 if (!offset) {
1287 gk20a_err(dev_from_vm(vm), 1286 nvgpu_err(vm->mm->g,
1288 "%s oom: sz=0x%llx", vma->name, size); 1287 "%s oom: sz=0x%llx", vma->name, size);
1289 return 0; 1288 return 0;
1290 } 1289 }
@@ -1405,14 +1404,13 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
1405{ 1404{
1406 bool kind_compressible; 1405 bool kind_compressible;
1407 struct gk20a *g = gk20a_from_vm(vm); 1406 struct gk20a *g = gk20a_from_vm(vm);
1408 struct device *d = dev_from_gk20a(g);
1409 int ctag_granularity = g->ops.fb.compression_page_size(g); 1407 int ctag_granularity = g->ops.fb.compression_page_size(g);
1410 1408
1411 if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v())) 1409 if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v()))
1412 bfr->kind_v = gmmu_pte_kind_pitch_v(); 1410 bfr->kind_v = gmmu_pte_kind_pitch_v();
1413 1411
1414 if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) { 1412 if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) {
1415 gk20a_err(d, "kind 0x%x not supported", bfr->kind_v); 1413 nvgpu_err(g, "kind 0x%x not supported", bfr->kind_v);
1416 return -EINVAL; 1414 return -EINVAL;
1417 } 1415 }
1418 1416
@@ -1423,7 +1421,7 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
1423 bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v); 1421 bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v);
1424 if (unlikely(bfr->uc_kind_v == gmmu_pte_kind_invalid_v())) { 1422 if (unlikely(bfr->uc_kind_v == gmmu_pte_kind_invalid_v())) {
1425 /* shouldn't happen, but it is worth cross-checking */ 1423 /* shouldn't happen, but it is worth cross-checking */
1426 gk20a_err(d, "comptag kind 0x%x can't be" 1424 nvgpu_err(g, "comptag kind 0x%x can't be"
1427 " downgraded to uncompressed kind", 1425 " downgraded to uncompressed kind",
1428 bfr->kind_v); 1426 bfr->kind_v);
1429 return -EINVAL; 1427 return -EINVAL;
@@ -1432,9 +1430,6 @@ static int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
1432 /* comptags only supported for suitable kinds, 128KB pagesize */ 1430 /* comptags only supported for suitable kinds, 128KB pagesize */
1433 if (kind_compressible && 1431 if (kind_compressible &&
1434 vm->gmmu_page_sizes[pgsz_idx] < g->ops.fb.compressible_page_size(g)) { 1432 vm->gmmu_page_sizes[pgsz_idx] < g->ops.fb.compressible_page_size(g)) {
1435 /*
1436 gk20a_warn(d, "comptags specified"
1437 " but pagesize being used doesn't support it");*/
1438 /* it is safe to fall back to uncompressed as 1433 /* it is safe to fall back to uncompressed as
1439 functionality is not harmed */ 1434 functionality is not harmed */
1440 bfr->kind_v = bfr->uc_kind_v; 1435 bfr->kind_v = bfr->uc_kind_v;
@@ -1453,19 +1448,19 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
1453 u64 map_offset, u64 map_size, 1448 u64 map_offset, u64 map_size,
1454 struct vm_reserved_va_node **pva_node) 1449 struct vm_reserved_va_node **pva_node)
1455{ 1450{
1456 struct device *dev = dev_from_vm(vm); 1451 struct gk20a *g = vm->mm->g;
1457 struct vm_reserved_va_node *va_node; 1452 struct vm_reserved_va_node *va_node;
1458 struct mapped_buffer_node *buffer; 1453 struct mapped_buffer_node *buffer;
1459 u64 map_end = map_offset + map_size; 1454 u64 map_end = map_offset + map_size;
1460 1455
1461 /* can wrap around with insane map_size; zero is disallowed too */ 1456 /* can wrap around with insane map_size; zero is disallowed too */
1462 if (map_end <= map_offset) { 1457 if (map_end <= map_offset) {
1463 gk20a_warn(dev, "fixed offset mapping with invalid map_size"); 1458 nvgpu_warn(g, "fixed offset mapping with invalid map_size");
1464 return -EINVAL; 1459 return -EINVAL;
1465 } 1460 }
1466 1461
1467 if (map_offset & (vm->gmmu_page_sizes[bfr->pgsz_idx] - 1)) { 1462 if (map_offset & (vm->gmmu_page_sizes[bfr->pgsz_idx] - 1)) {
1468 gk20a_err(dev, "map offset must be buffer page size aligned 0x%llx", 1463 nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx",
1469 map_offset); 1464 map_offset);
1470 return -EINVAL; 1465 return -EINVAL;
1471 } 1466 }
@@ -1474,13 +1469,13 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
1474 * userspace-managed address spaces */ 1469 * userspace-managed address spaces */
1475 va_node = addr_to_reservation(vm, map_offset); 1470 va_node = addr_to_reservation(vm, map_offset);
1476 if (!va_node && !vm->userspace_managed) { 1471 if (!va_node && !vm->userspace_managed) {
1477 gk20a_warn(dev, "fixed offset mapping without space allocation"); 1472 nvgpu_warn(g, "fixed offset mapping without space allocation");
1478 return -EINVAL; 1473 return -EINVAL;
1479 } 1474 }
1480 1475
1481 /* Mapped area should fit inside va, if there's one */ 1476 /* Mapped area should fit inside va, if there's one */
1482 if (va_node && map_end > va_node->vaddr_start + va_node->size) { 1477 if (va_node && map_end > va_node->vaddr_start + va_node->size) {
1483 gk20a_warn(dev, "fixed offset mapping size overflows va node"); 1478 nvgpu_warn(g, "fixed offset mapping size overflows va node");
1484 return -EINVAL; 1479 return -EINVAL;
1485 } 1480 }
1486 1481
@@ -1490,7 +1485,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
1490 buffer = find_mapped_buffer_less_than_locked( 1485 buffer = find_mapped_buffer_less_than_locked(
1491 vm->mapped_buffers, map_offset + map_size); 1486 vm->mapped_buffers, map_offset + map_size);
1492 if (buffer && buffer->addr + buffer->size > map_offset) { 1487 if (buffer && buffer->addr + buffer->size > map_offset) {
1493 gk20a_warn(dev, "overlapping buffer map requested"); 1488 nvgpu_warn(g, "overlapping buffer map requested");
1494 return -EINVAL; 1489 return -EINVAL;
1495 } 1490 }
1496 1491
@@ -1517,7 +1512,6 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1517{ 1512{
1518 int err = 0; 1513 int err = 0;
1519 bool allocated = false; 1514 bool allocated = false;
1520 struct device *d = dev_from_vm(vm);
1521 struct gk20a *g = gk20a_from_vm(vm); 1515 struct gk20a *g = gk20a_from_vm(vm);
1522 int ctag_granularity = g->ops.fb.compression_page_size(g); 1516 int ctag_granularity = g->ops.fb.compression_page_size(g);
1523 u32 ctag_lines = DIV_ROUND_UP_ULL(size, ctag_granularity); 1517 u32 ctag_lines = DIV_ROUND_UP_ULL(size, ctag_granularity);
@@ -1527,7 +1521,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1527 map_offset = gk20a_vm_alloc_va(vm, size, 1521 map_offset = gk20a_vm_alloc_va(vm, size,
1528 pgsz_idx); 1522 pgsz_idx);
1529 if (!map_offset) { 1523 if (!map_offset) {
1530 gk20a_err(d, "failed to allocate va space"); 1524 nvgpu_err(g, "failed to allocate va space");
1531 err = -ENOMEM; 1525 err = -ENOMEM;
1532 goto fail_alloc; 1526 goto fail_alloc;
1533 } 1527 }
@@ -1563,7 +1557,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1563 priv, 1557 priv,
1564 aperture); 1558 aperture);
1565 if (err) { 1559 if (err) {
1566 gk20a_err(d, "failed to update ptes on map"); 1560 nvgpu_err(g, "failed to update ptes on map");
1567 goto fail_validate; 1561 goto fail_validate;
1568 } 1562 }
1569 1563
@@ -1577,7 +1571,7 @@ fail_validate:
1577 if (allocated) 1571 if (allocated)
1578 gk20a_vm_free_va(vm, map_offset, size, pgsz_idx); 1572 gk20a_vm_free_va(vm, map_offset, size, pgsz_idx);
1579fail_alloc: 1573fail_alloc:
1580 gk20a_err(d, "%s: failed with err=%d\n", __func__, err); 1574 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err);
1581 return 0; 1575 return 0;
1582} 1576}
1583 1577
@@ -1596,8 +1590,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1596 if (va_allocated) { 1590 if (va_allocated) {
1597 err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx); 1591 err = gk20a_vm_free_va(vm, vaddr, size, pgsz_idx);
1598 if (err) { 1592 if (err) {
1599 dev_err(dev_from_vm(vm), 1593 nvgpu_err(g, "failed to free va");
1600 "failed to free va");
1601 return; 1594 return;
1602 } 1595 }
1603 } 1596 }
@@ -1614,8 +1607,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1614 sparse, 0, 1607 sparse, 0,
1615 APERTURE_INVALID); /* don't care for unmap */ 1608 APERTURE_INVALID); /* don't care for unmap */
1616 if (err) 1609 if (err)
1617 dev_err(dev_from_vm(vm), 1610 nvgpu_err(g, "failed to update gmmu ptes on unmap");
1618 "failed to update gmmu ptes on unmap");
1619 1611
1620 /* flush l2 so any dirty lines are written out *now*. 1612 /* flush l2 so any dirty lines are written out *now*.
1621 * also as we could potentially be switching this buffer 1613 * also as we could potentially be switching this buffer
@@ -1647,7 +1639,7 @@ static enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
1647 } else if (WARN_ON(buf_owner == g && !g->mm.vidmem_is_vidmem)) { 1639 } else if (WARN_ON(buf_owner == g && !g->mm.vidmem_is_vidmem)) {
1648 /* Looks like our video memory, but this gpu doesn't support 1640 /* Looks like our video memory, but this gpu doesn't support
1649 * it. Warn about a bug and bail out */ 1641 * it. Warn about a bug and bail out */
1650 gk20a_warn(dev_from_gk20a(g), 1642 nvgpu_warn(g,
1651 "dmabuf is our vidmem but we don't have local vidmem"); 1643 "dmabuf is our vidmem but we don't have local vidmem");
1652 return APERTURE_INVALID; 1644 return APERTURE_INVALID;
1653 } else if (buf_owner != g) { 1645 } else if (buf_owner != g) {
@@ -1860,7 +1852,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
1860 if (!g->mm.vidmem.cleared) { 1852 if (!g->mm.vidmem.cleared) {
1861 err = gk20a_vidmem_clear_all(g); 1853 err = gk20a_vidmem_clear_all(g);
1862 if (err) { 1854 if (err) {
1863 gk20a_err(g->dev, 1855 nvgpu_err(g,
1864 "failed to clear whole vidmem"); 1856 "failed to clear whole vidmem");
1865 goto err_kfree; 1857 goto err_kfree;
1866 } 1858 }
@@ -2037,7 +2029,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2037 2029
2038 if (user_mapped && vm->userspace_managed && 2030 if (user_mapped && vm->userspace_managed &&
2039 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { 2031 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) {
2040 gk20a_err(d, 2032 nvgpu_err(g,
2041 "%s: non-fixed-offset mapping not available on userspace managed address spaces", 2033 "%s: non-fixed-offset mapping not available on userspace managed address spaces",
2042 __func__); 2034 __func__);
2043 return -EFAULT; 2035 return -EFAULT;
@@ -2068,7 +2060,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2068 * track the difference between those two cases we have 2060 * track the difference between those two cases we have
2069 * to fail the mapping when we run out of SMMU space. 2061 * to fail the mapping when we run out of SMMU space.
2070 */ 2062 */
2071 gk20a_warn(d, "oom allocating tracking buffer"); 2063 nvgpu_warn(g, "oom allocating tracking buffer");
2072 goto clean_up; 2064 goto clean_up;
2073 } 2065 }
2074 2066
@@ -2111,7 +2103,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2111 2103
2112 err = setup_buffer_kind_and_compression(vm, flags, &bfr, bfr.pgsz_idx); 2104 err = setup_buffer_kind_and_compression(vm, flags, &bfr, bfr.pgsz_idx);
2113 if (unlikely(err)) { 2105 if (unlikely(err)) {
2114 gk20a_err(d, "failure setting up kind and compression"); 2106 nvgpu_err(g, "failure setting up kind and compression");
2115 goto clean_up; 2107 goto clean_up;
2116 } 2108 }
2117 2109
@@ -2204,7 +2196,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2204 /* TBD: check for multiple mapping of same buffer */ 2196 /* TBD: check for multiple mapping of same buffer */
2205 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); 2197 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer));
2206 if (!mapped_buffer) { 2198 if (!mapped_buffer) {
2207 gk20a_warn(d, "oom allocating tracking buffer"); 2199 nvgpu_warn(g, "oom allocating tracking buffer");
2208 goto clean_up; 2200 goto clean_up;
2209 } 2201 }
2210 mapped_buffer->dmabuf = dmabuf; 2202 mapped_buffer->dmabuf = dmabuf;
@@ -2230,7 +2222,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2230 2222
2231 err = insert_mapped_buffer(vm, mapped_buffer); 2223 err = insert_mapped_buffer(vm, mapped_buffer);
2232 if (err) { 2224 if (err) {
2233 gk20a_err(d, "failed to insert into mapped buffer tree"); 2225 nvgpu_err(g, "failed to insert into mapped buffer tree");
2234 goto clean_up; 2226 goto clean_up;
2235 } 2227 }
2236 inserted = true; 2228 inserted = true;
@@ -2274,7 +2266,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
2274 u32 *flags) 2266 u32 *flags)
2275{ 2267{
2276 struct mapped_buffer_node *mapped_buffer; 2268 struct mapped_buffer_node *mapped_buffer;
2277 struct device *d = dev_from_vm(vm); 2269 struct gk20a *g = vm->mm->g;
2278 2270
2279 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 2271 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
2280 2272
@@ -2283,7 +2275,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
2283 if (!mapped_buffer || !mapped_buffer->user_mapped) 2275 if (!mapped_buffer || !mapped_buffer->user_mapped)
2284 { 2276 {
2285 nvgpu_mutex_release(&vm->update_gmmu_lock); 2277 nvgpu_mutex_release(&vm->update_gmmu_lock);
2286 gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); 2278 nvgpu_err(g, "%s: bad offset 0x%llx", __func__, mapping_gva);
2287 return -EFAULT; 2279 return -EFAULT;
2288 } 2280 }
2289 2281
@@ -2316,19 +2308,18 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2316{ 2308{
2317 struct mapped_buffer_node *mapped_buffer; 2309 struct mapped_buffer_node *mapped_buffer;
2318 struct gk20a *g = gk20a_from_vm(vm); 2310 struct gk20a *g = gk20a_from_vm(vm);
2319 struct device *d = dev_from_vm(vm);
2320 const bool fixed_mapping = 2311 const bool fixed_mapping =
2321 (flags & NVGPU_AS_MAP_BUFFER_COMPBITS_FLAGS_FIXED_OFFSET) != 0; 2312 (flags & NVGPU_AS_MAP_BUFFER_COMPBITS_FLAGS_FIXED_OFFSET) != 0;
2322 2313
2323 if (vm->userspace_managed && !fixed_mapping) { 2314 if (vm->userspace_managed && !fixed_mapping) {
2324 gk20a_err(d, 2315 nvgpu_err(g,
2325 "%s: non-fixed-offset mapping is not available on userspace managed address spaces", 2316 "%s: non-fixed-offset mapping is not available on userspace managed address spaces",
2326 __func__); 2317 __func__);
2327 return -EFAULT; 2318 return -EFAULT;
2328 } 2319 }
2329 2320
2330 if (fixed_mapping && !vm->userspace_managed) { 2321 if (fixed_mapping && !vm->userspace_managed) {
2331 gk20a_err(d, 2322 nvgpu_err(g,
2332 "%s: fixed-offset mapping is available only on userspace managed address spaces", 2323 "%s: fixed-offset mapping is available only on userspace managed address spaces",
2333 __func__); 2324 __func__);
2334 return -EFAULT; 2325 return -EFAULT;
@@ -2341,13 +2332,13 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2341 2332
2342 if (!mapped_buffer || !mapped_buffer->user_mapped) { 2333 if (!mapped_buffer || !mapped_buffer->user_mapped) {
2343 nvgpu_mutex_release(&vm->update_gmmu_lock); 2334 nvgpu_mutex_release(&vm->update_gmmu_lock);
2344 gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); 2335 nvgpu_err(g, "%s: bad offset 0x%llx", __func__, mapping_gva);
2345 return -EFAULT; 2336 return -EFAULT;
2346 } 2337 }
2347 2338
2348 if (!mapped_buffer->ctags_mappable) { 2339 if (!mapped_buffer->ctags_mappable) {
2349 nvgpu_mutex_release(&vm->update_gmmu_lock); 2340 nvgpu_mutex_release(&vm->update_gmmu_lock);
2350 gk20a_err(d, "%s: comptags not mappable, offset 0x%llx", 2341 nvgpu_err(g, "%s: comptags not mappable, offset 0x%llx",
2351 __func__, mapping_gva); 2342 __func__, mapping_gva);
2352 return -EFAULT; 2343 return -EFAULT;
2353 } 2344 }
@@ -2366,7 +2357,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2366 2357
2367 if (!mapped_buffer->ctag_map_win_size) { 2358 if (!mapped_buffer->ctag_map_win_size) {
2368 nvgpu_mutex_release(&vm->update_gmmu_lock); 2359 nvgpu_mutex_release(&vm->update_gmmu_lock);
2369 gk20a_err(d, 2360 nvgpu_err(g,
2370 "%s: mapping 0x%llx does not have " 2361 "%s: mapping 0x%llx does not have "
2371 "mappable comptags", 2362 "mappable comptags",
2372 __func__, mapping_gva); 2363 __func__, mapping_gva);
@@ -2402,7 +2393,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2402 * before before the buffer is 2393 * before before the buffer is
2403 * unmapped */ 2394 * unmapped */
2404 nvgpu_mutex_release(&vm->update_gmmu_lock); 2395 nvgpu_mutex_release(&vm->update_gmmu_lock);
2405 gk20a_err(d, 2396 nvgpu_err(g,
2406 "%s: comptags cannot be mapped into allocated space", 2397 "%s: comptags cannot be mapped into allocated space",
2407 __func__); 2398 __func__);
2408 return -EINVAL; 2399 return -EINVAL;
@@ -2429,7 +2420,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2429 2420
2430 if (!mapped_buffer->ctag_map_win_addr) { 2421 if (!mapped_buffer->ctag_map_win_addr) {
2431 nvgpu_mutex_release(&vm->update_gmmu_lock); 2422 nvgpu_mutex_release(&vm->update_gmmu_lock);
2432 gk20a_err(d, 2423 nvgpu_err(g,
2433 "%s: failed to map comptags for mapping 0x%llx", 2424 "%s: failed to map comptags for mapping 0x%llx",
2434 __func__, mapping_gva); 2425 __func__, mapping_gva);
2435 return -ENOMEM; 2426 return -ENOMEM;
@@ -2437,7 +2428,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2437 } else if (fixed_mapping && *compbits_win_gva && 2428 } else if (fixed_mapping && *compbits_win_gva &&
2438 mapped_buffer->ctag_map_win_addr != *compbits_win_gva) { 2429 mapped_buffer->ctag_map_win_addr != *compbits_win_gva) {
2439 nvgpu_mutex_release(&vm->update_gmmu_lock); 2430 nvgpu_mutex_release(&vm->update_gmmu_lock);
2440 gk20a_err(d, 2431 nvgpu_err(g,
2441 "%s: re-requesting comptags map into mismatching address. buffer offset 0x" 2432 "%s: re-requesting comptags map into mismatching address. buffer offset 0x"
2442 "%llx, existing comptag map at 0x%llx, requested remap 0x%llx", 2433 "%llx, existing comptag map at 0x%llx, requested remap 0x%llx",
2443 __func__, mapping_gva, 2434 __func__, mapping_gva,
@@ -2486,7 +2477,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
2486 aperture); 2477 aperture);
2487 nvgpu_mutex_release(&vm->update_gmmu_lock); 2478 nvgpu_mutex_release(&vm->update_gmmu_lock);
2488 if (!vaddr) { 2479 if (!vaddr) {
2489 gk20a_err(dev_from_vm(vm), "failed to allocate va space"); 2480 nvgpu_err(g, "failed to allocate va space");
2490 return 0; 2481 return 0;
2491 } 2482 }
2492 2483
@@ -2553,7 +2544,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
2553 &gk20a_fence_out); 2544 &gk20a_fence_out);
2554 2545
2555 if (err) { 2546 if (err) {
2556 gk20a_err(g->dev, 2547 nvgpu_err(g,
2557 "Failed gk20a_ce_execute_ops[%d]", err); 2548 "Failed gk20a_ce_execute_ops[%d]", err);
2558 return err; 2549 return err;
2559 } 2550 }
@@ -2576,7 +2567,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
2576 2567
2577 gk20a_fence_put(gk20a_last_fence); 2568 gk20a_fence_put(gk20a_last_fence);
2578 if (err) 2569 if (err)
2579 gk20a_err(g->dev, 2570 nvgpu_err(g,
2580 "fence wait failed for CE execute ops"); 2571 "fence wait failed for CE execute ops");
2581 } 2572 }
2582 2573
@@ -2692,7 +2683,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
2692 int err = 0; 2683 int err = 0;
2693 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); 2684 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
2694 if (!(*sgt)) { 2685 if (!(*sgt)) {
2695 dev_err(d, "failed to allocate memory\n"); 2686 nvgpu_err(g, "failed to allocate memory\n");
2696 err = -ENOMEM; 2687 err = -ENOMEM;
2697 goto fail; 2688 goto fail;
2698 } 2689 }
@@ -2700,7 +2691,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
2700 cpuva, iova, 2691 cpuva, iova,
2701 size); 2692 size);
2702 if (err) { 2693 if (err) {
2703 dev_err(d, "failed to create sg table\n"); 2694 nvgpu_err(g, "failed to create sg table\n");
2704 goto fail; 2695 goto fail;
2705 } 2696 }
2706 sg_dma_address((*sgt)->sgl) = iova; 2697 sg_dma_address((*sgt)->sgl) = iova;
@@ -2723,14 +2714,14 @@ int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt,
2723 2714
2724 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); 2715 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
2725 if (!(*sgt)) { 2716 if (!(*sgt)) {
2726 dev_err(d, "failed to allocate memory\n"); 2717 nvgpu_err(g, "failed to allocate memory\n");
2727 err = -ENOMEM; 2718 err = -ENOMEM;
2728 goto fail; 2719 goto fail;
2729 } 2720 }
2730 err = sg_alloc_table_from_pages(*sgt, pages, 2721 err = sg_alloc_table_from_pages(*sgt, pages,
2731 DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL); 2722 DIV_ROUND_UP(size, PAGE_SIZE), 0, size, GFP_KERNEL);
2732 if (err) { 2723 if (err) {
2733 dev_err(d, "failed to allocate sg_table\n"); 2724 nvgpu_err(g, "failed to allocate sg_table\n");
2734 goto fail; 2725 goto fail;
2735 } 2726 }
2736 sg_dma_address((*sgt)->sgl) = iova; 2727 sg_dma_address((*sgt)->sgl) = iova;
@@ -3049,7 +3040,7 @@ static int update_gmmu_level_locked(struct vm_gk20a *vm,
3049 /* get cpu access to the ptes */ 3040 /* get cpu access to the ptes */
3050 err = map_gmmu_pages(g, next_pte); 3041 err = map_gmmu_pages(g, next_pte);
3051 if (err) { 3042 if (err) {
3052 gk20a_err(dev_from_vm(vm), 3043 nvgpu_err(g,
3053 "couldn't map ptes for update as=%d", 3044 "couldn't map ptes for update as=%d",
3054 vm_aspace_id(vm)); 3045 vm_aspace_id(vm));
3055 return err; 3046 return err;
@@ -3113,7 +3104,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3113 3104
3114 err = map_gmmu_pages(g, &vm->pdb); 3105 err = map_gmmu_pages(g, &vm->pdb);
3115 if (err) { 3106 if (err) {
3116 gk20a_err(dev_from_vm(vm), 3107 nvgpu_err(g,
3117 "couldn't map ptes for update as=%d", 3108 "couldn't map ptes for update as=%d",
3118 vm_aspace_id(vm)); 3109 vm_aspace_id(vm));
3119 return err; 3110 return err;
@@ -3284,14 +3275,14 @@ void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer,
3284 3275
3285void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset) 3276void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset)
3286{ 3277{
3287 struct device *d = dev_from_vm(vm); 3278 struct gk20a *g = vm->mm->g;
3288 struct mapped_buffer_node *mapped_buffer; 3279 struct mapped_buffer_node *mapped_buffer;
3289 3280
3290 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 3281 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
3291 mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset); 3282 mapped_buffer = find_mapped_buffer_locked(vm->mapped_buffers, offset);
3292 if (!mapped_buffer) { 3283 if (!mapped_buffer) {
3293 nvgpu_mutex_release(&vm->update_gmmu_lock); 3284 nvgpu_mutex_release(&vm->update_gmmu_lock);
3294 gk20a_err(d, "invalid addr to unmap 0x%llx", offset); 3285 nvgpu_err(g, "invalid addr to unmap 0x%llx", offset);
3295 return; 3286 return;
3296 } 3287 }
3297 3288
@@ -4195,14 +4186,13 @@ void gk20a_deinit_vm(struct vm_gk20a *vm)
4195 4186
4196int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) 4187int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
4197{ 4188{
4198 struct device *dev = dev_from_gk20a(g);
4199 int err; 4189 int err;
4200 4190
4201 gk20a_dbg_fn(""); 4191 gk20a_dbg_fn("");
4202 4192
4203 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); 4193 err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
4204 if (err) { 4194 if (err) {
4205 gk20a_err(dev, "%s: memory allocation failed\n", __func__); 4195 nvgpu_err(g, "%s: memory allocation failed\n", __func__);
4206 return err; 4196 return err;
4207 } 4197 }
4208 4198
@@ -4462,8 +4452,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
4462 } while (!nvgpu_timeout_expired(&timeout)); 4452 } while (!nvgpu_timeout_expired(&timeout));
4463 4453
4464 if (nvgpu_timeout_peek_expired(&timeout)) 4454 if (nvgpu_timeout_peek_expired(&timeout))
4465 gk20a_warn(dev_from_gk20a(g), 4455 nvgpu_warn(g, "l2_system_invalidate too many retries");
4466 "l2_system_invalidate too many retries");
4467 4456
4468 trace_gk20a_mm_l2_invalidate_done(g->name); 4457 trace_gk20a_mm_l2_invalidate_done(g->name);
4469} 4458}