aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/radeon/ni.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c322
3 files changed, 262 insertions, 73 deletions
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 83dc0852d5c9..ab8d1f5fe68a 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1580,7 +1580,7 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1580 radeon_ring_write(ring, 0); 1580 radeon_ring_write(ring, 0);
1581 1581
1582 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0)); 1582 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
1583 radeon_ring_write(ring, vm->last_pfn); 1583 radeon_ring_write(ring, rdev->vm_manager.max_pfn);
1584 1584
1585 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); 1585 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
1586 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 1586 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b04c06444d8b..bc6b56bf274a 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -663,9 +663,14 @@ struct radeon_vm {
663 struct list_head list; 663 struct list_head list;
664 struct list_head va; 664 struct list_head va;
665 unsigned id; 665 unsigned id;
666 unsigned last_pfn; 666
667 u64 pd_gpu_addr; 667 /* contains the page directory */
668 struct radeon_sa_bo *sa_bo; 668 struct radeon_sa_bo *page_directory;
669 uint64_t pd_gpu_addr;
670
671 /* array of page tables, one for each page directory entry */
672 struct radeon_sa_bo **page_tables;
673
669 struct mutex mutex; 674 struct mutex mutex;
670 /* last fence for cs using this vm */ 675 /* last fence for cs using this vm */
671 struct radeon_fence *fence; 676 struct radeon_fence *fence;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index f0c06d196b75..98b170a0df90 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -423,6 +423,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
423 */ 423 */
424 424
425/** 425/**
426 * radeon_vm_num_pde - return the number of page directory entries
427 *
428 * @rdev: radeon_device pointer
429 *
430 * Calculate the number of page directory entries (cayman+).
431 */
432static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
433{
434 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
435}
436
437/**
426 * radeon_vm_directory_size - returns the size of the page directory in bytes 438 * radeon_vm_directory_size - returns the size of the page directory in bytes
427 * 439 *
428 * @rdev: radeon_device pointer 440 * @rdev: radeon_device pointer
@@ -431,7 +443,7 @@ void radeon_gart_fini(struct radeon_device *rdev)
431 */ 443 */
432static unsigned radeon_vm_directory_size(struct radeon_device *rdev) 444static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
433{ 445{
434 return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8; 446 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
435} 447}
436 448
437/** 449/**
@@ -451,11 +463,11 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
451 463
452 if (!rdev->vm_manager.enabled) { 464 if (!rdev->vm_manager.enabled) {
453 /* allocate enough for 2 full VM pts */ 465 /* allocate enough for 2 full VM pts */
454 size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); 466 size = radeon_vm_directory_size(rdev);
455 size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8); 467 size += rdev->vm_manager.max_pfn * 8;
456 size *= 2; 468 size *= 2;
457 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 469 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
458 size, 470 RADEON_GPU_PAGE_ALIGN(size),
459 RADEON_GEM_DOMAIN_VRAM); 471 RADEON_GEM_DOMAIN_VRAM);
460 if (r) { 472 if (r) {
461 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 473 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -476,7 +488,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
476 488
477 /* restore page table */ 489 /* restore page table */
478 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { 490 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
479 if (vm->sa_bo == NULL) 491 if (vm->page_directory == NULL)
480 continue; 492 continue;
481 493
482 list_for_each_entry(bo_va, &vm->va, vm_list) { 494 list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -500,16 +512,25 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
500 struct radeon_vm *vm) 512 struct radeon_vm *vm)
501{ 513{
502 struct radeon_bo_va *bo_va; 514 struct radeon_bo_va *bo_va;
515 int i;
503 516
504 if (!vm->sa_bo) 517 if (!vm->page_directory)
505 return; 518 return;
506 519
507 list_del_init(&vm->list); 520 list_del_init(&vm->list);
508 radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence); 521 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
509 522
510 list_for_each_entry(bo_va, &vm->va, vm_list) { 523 list_for_each_entry(bo_va, &vm->va, vm_list) {
511 bo_va->valid = false; 524 bo_va->valid = false;
512 } 525 }
526
527 if (vm->page_tables == NULL)
528 return;
529
530 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
531 radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
532
533 kfree(vm->page_tables);
513} 534}
514 535
515/** 536/**
@@ -546,6 +567,35 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
546} 567}
547 568
548/** 569/**
570 * radeon_vm_evict - evict page table to make room for new one
571 *
572 * @rdev: radeon_device pointer
573 * @vm: VM we want to allocate something for
574 *
575 * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
576 * Returns 0 for success, -ENOMEM for failure.
577 *
578 * Global and local mutex must be locked!
579 */
580int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
581{
582 struct radeon_vm *vm_evict;
583
584 if (list_empty(&rdev->vm_manager.lru_vm))
585 return -ENOMEM;
586
587 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
588 struct radeon_vm, list);
589 if (vm_evict == vm)
590 return -ENOMEM;
591
592 mutex_lock(&vm_evict->mutex);
593 radeon_vm_free_pt(rdev, vm_evict);
594 mutex_unlock(&vm_evict->mutex);
595 return 0;
596}
597
598/**
549 * radeon_vm_alloc_pt - allocates a page table for a VM 599 * radeon_vm_alloc_pt - allocates a page table for a VM
550 * 600 *
551 * @rdev: radeon_device pointer 601 * @rdev: radeon_device pointer
@@ -559,20 +609,15 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
559 */ 609 */
560int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) 610int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
561{ 611{
562 struct radeon_vm *vm_evict; 612 unsigned pd_size, pts_size;
563 int r;
564 u64 *pd_addr; 613 u64 *pd_addr;
565 int tables_size; 614 int r;
566 615
567 if (vm == NULL) { 616 if (vm == NULL) {
568 return -EINVAL; 617 return -EINVAL;
569 } 618 }
570 619
571 /* allocate enough to cover the current VM size */ 620 if (vm->page_directory != NULL) {
572 tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
573 tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
574
575 if (vm->sa_bo != NULL) {
576 /* update lru */ 621 /* update lru */
577 list_del_init(&vm->list); 622 list_del_init(&vm->list);
578 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 623 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
@@ -580,25 +625,34 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
580 } 625 }
581 626
582retry: 627retry:
583 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, 628 pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
584 tables_size, RADEON_GPU_PAGE_SIZE, false); 629 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
630 &vm->page_directory, pd_size,
631 RADEON_GPU_PAGE_SIZE, false);
585 if (r == -ENOMEM) { 632 if (r == -ENOMEM) {
586 if (list_empty(&rdev->vm_manager.lru_vm)) { 633 r = radeon_vm_evict(rdev, vm);
634 if (r)
587 return r; 635 return r;
588 }
589 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
590 mutex_lock(&vm_evict->mutex);
591 radeon_vm_free_pt(rdev, vm_evict);
592 mutex_unlock(&vm_evict->mutex);
593 goto retry; 636 goto retry;
594 637
595 } else if (r) { 638 } else if (r) {
596 return r; 639 return r;
597 } 640 }
598 641
599 pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo); 642 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
600 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); 643
601 memset(pd_addr, 0, tables_size); 644 /* Initially clear the page directory */
645 pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
646 memset(pd_addr, 0, pd_size);
647
648 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
649 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
650
651 if (vm->page_tables == NULL) {
652 DRM_ERROR("Cannot allocate memory for page table array\n");
653 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
654 return -ENOMEM;
655 }
602 656
603 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 657 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
604 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, 658 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
@@ -793,20 +847,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
793 } 847 }
794 848
795 mutex_lock(&vm->mutex); 849 mutex_lock(&vm->mutex);
796 if (last_pfn > vm->last_pfn) {
797 /* release mutex and lock in right order */
798 mutex_unlock(&vm->mutex);
799 mutex_lock(&rdev->vm_manager.lock);
800 mutex_lock(&vm->mutex);
801 /* and check again */
802 if (last_pfn > vm->last_pfn) {
803 /* grow va space 32M by 32M */
804 unsigned align = ((32 << 20) >> 12) - 1;
805 radeon_vm_free_pt(rdev, vm);
806 vm->last_pfn = (last_pfn + align) & ~align;
807 }
808 mutex_unlock(&rdev->vm_manager.lock);
809 }
810 head = &vm->va; 850 head = &vm->va;
811 last_offset = 0; 851 last_offset = 0;
812 list_for_each_entry(tmp, &vm->va, vm_list) { 852 list_for_each_entry(tmp, &vm->va, vm_list) {
@@ -865,6 +905,155 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
865} 905}
866 906
867/** 907/**
908 * radeon_vm_update_pdes - make sure that page directory is valid
909 *
910 * @rdev: radeon_device pointer
911 * @vm: requested vm
912 * @start: start of GPU address range
913 * @end: end of GPU address range
914 *
915 * Allocates new page tables if necessary
916 * and updates the page directory (cayman+).
917 * Returns 0 for success, error for failure.
918 *
919 * Global and local mutex must be locked!
920 */
921static int radeon_vm_update_pdes(struct radeon_device *rdev,
922 struct radeon_vm *vm,
923 uint64_t start, uint64_t end)
924{
925 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
926
927 uint64_t last_pde = ~0, last_pt = ~0;
928 unsigned count = 0;
929 uint64_t pt_idx;
930 int r;
931
932 start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
933 end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
934
935 /* walk over the address space and update the page directory */
936 for (pt_idx = start; pt_idx <= end; ++pt_idx) {
937 uint64_t pde, pt;
938
939 if (vm->page_tables[pt_idx])
940 continue;
941
942retry:
943 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
944 &vm->page_tables[pt_idx],
945 RADEON_VM_PTE_COUNT * 8,
946 RADEON_GPU_PAGE_SIZE, false);
947
948 if (r == -ENOMEM) {
949 r = radeon_vm_evict(rdev, vm);
950 if (r)
951 return r;
952 goto retry;
953 } else if (r) {
954 return r;
955 }
956
957 pde = vm->pd_gpu_addr + pt_idx * 8;
958
959 pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
960
961 if (((last_pde + 8 * count) != pde) ||
962 ((last_pt + incr * count) != pt)) {
963
964 if (count) {
965 radeon_asic_vm_set_page(rdev, last_pde,
966 last_pt, count, incr,
967 RADEON_VM_PAGE_VALID);
968 }
969
970 count = 1;
971 last_pde = pde;
972 last_pt = pt;
973 } else {
974 ++count;
975 }
976 }
977
978 if (count) {
979 radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
980 incr, RADEON_VM_PAGE_VALID);
981
982 }
983
984 return 0;
985}
986
987/**
988 * radeon_vm_update_ptes - make sure that page tables are valid
989 *
990 * @rdev: radeon_device pointer
991 * @vm: requested vm
992 * @start: start of GPU address range
993 * @end: end of GPU address range
994 * @dst: destination address to map to
995 * @flags: mapping flags
996 *
997 * Update the page tables in the range @start - @end (cayman+).
998 *
999 * Global and local mutex must be locked!
1000 */
1001static void radeon_vm_update_ptes(struct radeon_device *rdev,
1002 struct radeon_vm *vm,
1003 uint64_t start, uint64_t end,
1004 uint64_t dst, uint32_t flags)
1005{
1006 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
1007
1008 uint64_t last_pte = ~0, last_dst = ~0;
1009 unsigned count = 0;
1010 uint64_t addr;
1011
1012 start = start / RADEON_GPU_PAGE_SIZE;
1013 end = end / RADEON_GPU_PAGE_SIZE;
1014
1015 /* walk over the address space and update the page tables */
1016 for (addr = start; addr < end; ) {
1017 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
1018 unsigned nptes;
1019 uint64_t pte;
1020
1021 if ((addr & ~mask) == (end & ~mask))
1022 nptes = end - addr;
1023 else
1024 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
1025
1026 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
1027 pte += (addr & mask) * 8;
1028
1029 if (((last_pte + 8 * count) != pte) ||
1030 ((count + nptes) > 1 << 11)) {
1031
1032 if (count) {
1033 radeon_asic_vm_set_page(rdev, last_pte,
1034 last_dst, count,
1035 RADEON_GPU_PAGE_SIZE,
1036 flags);
1037 }
1038
1039 count = nptes;
1040 last_pte = pte;
1041 last_dst = dst;
1042 } else {
1043 count += nptes;
1044 }
1045
1046 addr += nptes;
1047 dst += nptes * RADEON_GPU_PAGE_SIZE;
1048 }
1049
1050 if (count) {
1051 radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
1052 RADEON_GPU_PAGE_SIZE, flags);
1053 }
1054}
1055
1056/**
868 * radeon_vm_bo_update_pte - map a bo into the vm page table 1057 * radeon_vm_bo_update_pte - map a bo into the vm page table
869 * 1058 *
870 * @rdev: radeon_device pointer 1059 * @rdev: radeon_device pointer
@@ -887,12 +1076,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
887 struct radeon_semaphore *sem = NULL; 1076 struct radeon_semaphore *sem = NULL;
888 struct radeon_bo_va *bo_va; 1077 struct radeon_bo_va *bo_va;
889 unsigned nptes, npdes, ndw; 1078 unsigned nptes, npdes, ndw;
890 uint64_t pe, addr; 1079 uint64_t addr;
891 uint64_t pfn;
892 int r; 1080 int r;
893 1081
894 /* nothing to do if vm isn't bound */ 1082 /* nothing to do if vm isn't bound */
895 if (vm->sa_bo == NULL) 1083 if (vm->page_directory == NULL)
896 return 0; 1084 return 0;
897 1085
898 bo_va = radeon_vm_bo_find(vm, bo); 1086 bo_va = radeon_vm_bo_find(vm, bo);
@@ -939,25 +1127,29 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
939 } 1127 }
940 } 1128 }
941 1129
942 /* estimate number of dw needed */
943 /* reserve space for 32-bit padding */
944 ndw = 32;
945
946 nptes = radeon_bo_ngpu_pages(bo); 1130 nptes = radeon_bo_ngpu_pages(bo);
947 1131
948 pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE); 1132 /* assume two extra pdes in case the mapping overlaps the borders */
1133 npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
1134
1135 /* estimate number of dw needed */
1136 /* semaphore, fence and padding */
1137 ndw = 32;
949 1138
950 /* handle cases where a bo spans several pdes */ 1139 if (RADEON_VM_BLOCK_SIZE > 11)
951 npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) - 1140 /* reserve space for one header for every 2k dwords */
952 (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE; 1141 ndw += (nptes >> 11) * 3;
1142 else
1143 /* reserve space for one header for
1144 every (1 << BLOCK_SIZE) entries */
1145 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3;
953 1146
954 /* reserve space for one header for every 2k dwords */
955 ndw += (nptes >> 11) * 3;
956 /* reserve space for pte addresses */ 1147 /* reserve space for pte addresses */
957 ndw += nptes * 2; 1148 ndw += nptes * 2;
958 1149
959 /* reserve space for one header for every 2k dwords */ 1150 /* reserve space for one header for every 2k dwords */
960 ndw += (npdes >> 11) * 3; 1151 ndw += (npdes >> 11) * 3;
1152
961 /* reserve space for pde addresses */ 1153 /* reserve space for pde addresses */
962 ndw += npdes * 2; 1154 ndw += npdes * 2;
963 1155
@@ -971,22 +1163,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
971 radeon_fence_note_sync(vm->fence, ridx); 1163 radeon_fence_note_sync(vm->fence, ridx);
972 } 1164 }
973 1165
974 /* update page table entries */ 1166 r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
975 pe = vm->pd_gpu_addr; 1167 if (r) {
976 pe += radeon_vm_directory_size(rdev); 1168 radeon_ring_unlock_undo(rdev, ring);
977 pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8; 1169 return r;
978 1170 }
979 radeon_asic_vm_set_page(rdev, pe, addr, nptes,
980 RADEON_GPU_PAGE_SIZE, bo_va->flags);
981
982 /* update page directory entries */
983 addr = pe;
984
985 pe = vm->pd_gpu_addr;
986 pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
987 1171
988 radeon_asic_vm_set_page(rdev, pe, addr, npdes, 1172 radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
989 RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID); 1173 addr, bo_va->flags);
990 1174
991 radeon_fence_unref(&vm->fence); 1175 radeon_fence_unref(&vm->fence);
992 r = radeon_fence_emit(rdev, &vm->fence, ridx); 1176 r = radeon_fence_emit(rdev, &vm->fence, ridx);
@@ -997,6 +1181,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
997 radeon_ring_unlock_commit(rdev, ring); 1181 radeon_ring_unlock_commit(rdev, ring);
998 radeon_semaphore_free(rdev, &sem, vm->fence); 1182 radeon_semaphore_free(rdev, &sem, vm->fence);
999 radeon_fence_unref(&vm->last_flush); 1183 radeon_fence_unref(&vm->last_flush);
1184
1000 return 0; 1185 return 0;
1001} 1186}
1002 1187
@@ -1068,7 +1253,6 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1068 1253
1069 vm->id = 0; 1254 vm->id = 0;
1070 vm->fence = NULL; 1255 vm->fence = NULL;
1071 vm->last_pfn = 0;
1072 mutex_init(&vm->mutex); 1256 mutex_init(&vm->mutex);
1073 INIT_LIST_HEAD(&vm->list); 1257 INIT_LIST_HEAD(&vm->list);
1074 INIT_LIST_HEAD(&vm->va); 1258 INIT_LIST_HEAD(&vm->va);