diff options
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 50 |
4 files changed, 27 insertions, 47 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 40850afa763f..d4e9272b60e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -917,8 +917,8 @@ struct amdgpu_ring { | |||
917 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | 917 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 |
918 | 918 | ||
919 | struct amdgpu_vm_pt { | 919 | struct amdgpu_vm_pt { |
920 | struct amdgpu_bo *bo; | 920 | struct amdgpu_bo_list_entry entry; |
921 | uint64_t addr; | 921 | uint64_t addr; |
922 | }; | 922 | }; |
923 | 923 | ||
924 | struct amdgpu_vm_id { | 924 | struct amdgpu_vm_id { |
@@ -983,8 +983,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | |||
983 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | 983 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, |
984 | struct list_head *validated, | 984 | struct list_head *validated, |
985 | struct amdgpu_bo_list_entry *entry); | 985 | struct amdgpu_bo_list_entry *entry); |
986 | struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, | 986 | void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); |
987 | struct list_head *duplicates); | ||
988 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | 987 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
989 | struct amdgpu_sync *sync); | 988 | struct amdgpu_sync *sync); |
990 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | 989 | void amdgpu_vm_flush(struct amdgpu_ring *ring, |
@@ -1255,7 +1254,6 @@ struct amdgpu_cs_parser { | |||
1255 | struct amdgpu_cs_chunk *chunks; | 1254 | struct amdgpu_cs_chunk *chunks; |
1256 | /* relocations */ | 1255 | /* relocations */ |
1257 | struct amdgpu_bo_list_entry vm_pd; | 1256 | struct amdgpu_bo_list_entry vm_pd; |
1258 | struct amdgpu_bo_list_entry *vm_bos; | ||
1259 | struct list_head validated; | 1257 | struct list_head validated; |
1260 | struct fence *fence; | 1258 | struct fence *fence; |
1261 | 1259 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3fb21ecd29e0..6ce595ff1aff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -396,11 +396,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | |||
396 | if (unlikely(r != 0)) | 396 | if (unlikely(r != 0)) |
397 | goto error_reserve; | 397 | goto error_reserve; |
398 | 398 | ||
399 | p->vm_bos = amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); | 399 | amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); |
400 | if (!p->vm_bos) { | ||
401 | r = -ENOMEM; | ||
402 | goto error_validate; | ||
403 | } | ||
404 | 400 | ||
405 | r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); | 401 | r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); |
406 | if (r) | 402 | if (r) |
@@ -483,7 +479,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | |||
483 | if (parser->bo_list) | 479 | if (parser->bo_list) |
484 | amdgpu_bo_list_put(parser->bo_list); | 480 | amdgpu_bo_list_put(parser->bo_list); |
485 | 481 | ||
486 | drm_free_large(parser->vm_bos); | ||
487 | for (i = 0; i < parser->nchunks; i++) | 482 | for (i = 0; i < parser->nchunks; i++) |
488 | drm_free_large(parser->chunks[i].kdata); | 483 | drm_free_large(parser->chunks[i].kdata); |
489 | kfree(parser->chunks); | 484 | kfree(parser->chunks); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ea0fe94e4b54..8c5687e4a6d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -447,7 +447,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
447 | struct amdgpu_bo_va *bo_va, uint32_t operation) | 447 | struct amdgpu_bo_va *bo_va, uint32_t operation) |
448 | { | 448 | { |
449 | struct ttm_validate_buffer tv, *entry; | 449 | struct ttm_validate_buffer tv, *entry; |
450 | struct amdgpu_bo_list_entry *vm_bos; | ||
451 | struct amdgpu_bo_list_entry vm_pd; | 450 | struct amdgpu_bo_list_entry vm_pd; |
452 | struct ww_acquire_ctx ticket; | 451 | struct ww_acquire_ctx ticket; |
453 | struct list_head list, duplicates; | 452 | struct list_head list, duplicates; |
@@ -468,12 +467,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
468 | if (r) | 467 | if (r) |
469 | goto error_print; | 468 | goto error_print; |
470 | 469 | ||
471 | vm_bos = amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); | 470 | amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); |
472 | if (!vm_bos) { | ||
473 | r = -ENOMEM; | ||
474 | goto error_unreserve; | ||
475 | } | ||
476 | |||
477 | list_for_each_entry(entry, &list, head) { | 471 | list_for_each_entry(entry, &list, head) { |
478 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | 472 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); |
479 | /* if anything is swapped out don't swap it in here, | 473 | /* if anything is swapped out don't swap it in here, |
@@ -494,7 +488,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
494 | 488 | ||
495 | error_unreserve: | 489 | error_unreserve: |
496 | ttm_eu_backoff_reservation(&ticket, &list); | 490 | ttm_eu_backoff_reservation(&ticket, &list); |
497 | drm_free_large(vm_bos); | ||
498 | 491 | ||
499 | error_print: | 492 | error_print: |
500 | if (r && r != -ERESTARTSYS) | 493 | if (r && r != -ERESTARTSYS) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 592be6438a6c..e0fa9d9ff5c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -98,40 +98,27 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | |||
98 | } | 98 | } |
99 | 99 | ||
100 | /** | 100 | /** |
101 | * amdgpu_vm_get_bos - add the vm BOs to a validation list | 101 | * amdgpu_vm_get_bos - add the vm BOs to a duplicates list |
102 | * | 102 | * |
103 | * @vm: vm providing the BOs | 103 | * @vm: vm providing the BOs |
104 | * @duplicates: head of duplicates list | 104 | * @duplicates: head of duplicates list |
105 | * | 105 | * |
106 | * Add the page directory to the list of BOs to | 106 | * Add the page directory to the BO duplicates list |
107 | * validate for command submission (cayman+). | 107 | * for command submission. |
108 | */ | 108 | */ |
109 | struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, | 109 | void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) |
110 | struct list_head *duplicates) | ||
111 | { | 110 | { |
112 | struct amdgpu_bo_list_entry *list; | 111 | unsigned i; |
113 | unsigned i, idx; | ||
114 | |||
115 | list = drm_malloc_ab(vm->max_pde_used + 1, | ||
116 | sizeof(struct amdgpu_bo_list_entry)); | ||
117 | if (!list) | ||
118 | return NULL; | ||
119 | 112 | ||
120 | /* add the vm page table to the list */ | 113 | /* add the vm page table to the list */ |
121 | for (i = 0, idx = 0; i <= vm->max_pde_used; i++) { | 114 | for (i = 0; i <= vm->max_pde_used; ++i) { |
122 | if (!vm->page_tables[i].bo) | 115 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; |
116 | |||
117 | if (!entry->robj) | ||
123 | continue; | 118 | continue; |
124 | 119 | ||
125 | list[idx].robj = vm->page_tables[i].bo; | 120 | list_add(&entry->tv.head, duplicates); |
126 | list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; | ||
127 | list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; | ||
128 | list[idx].priority = 0; | ||
129 | list[idx].tv.bo = &list[idx].robj->tbo; | ||
130 | list[idx].tv.shared = true; | ||
131 | list_add(&list[idx++].tv.head, duplicates); | ||
132 | } | 121 | } |
133 | |||
134 | return list; | ||
135 | } | 122 | } |
136 | 123 | ||
137 | /** | 124 | /** |
@@ -474,7 +461,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
474 | 461 | ||
475 | /* walk over the address space and update the page directory */ | 462 | /* walk over the address space and update the page directory */ |
476 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | 463 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { |
477 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; | 464 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; |
478 | uint64_t pde, pt; | 465 | uint64_t pde, pt; |
479 | 466 | ||
480 | if (bo == NULL) | 467 | if (bo == NULL) |
@@ -651,7 +638,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
651 | /* walk over the address space and update the page tables */ | 638 | /* walk over the address space and update the page tables */ |
652 | for (addr = start; addr < end; ) { | 639 | for (addr = start; addr < end; ) { |
653 | uint64_t pt_idx = addr >> amdgpu_vm_block_size; | 640 | uint64_t pt_idx = addr >> amdgpu_vm_block_size; |
654 | struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo; | 641 | struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; |
655 | unsigned nptes; | 642 | unsigned nptes; |
656 | uint64_t pte; | 643 | uint64_t pte; |
657 | int r; | 644 | int r; |
@@ -1083,9 +1070,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1083 | /* walk over the address space and allocate the page tables */ | 1070 | /* walk over the address space and allocate the page tables */ |
1084 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | 1071 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { |
1085 | struct reservation_object *resv = vm->page_directory->tbo.resv; | 1072 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
1073 | struct amdgpu_bo_list_entry *entry; | ||
1086 | struct amdgpu_bo *pt; | 1074 | struct amdgpu_bo *pt; |
1087 | 1075 | ||
1088 | if (vm->page_tables[pt_idx].bo) | 1076 | entry = &vm->page_tables[pt_idx].entry; |
1077 | if (entry->robj) | ||
1089 | continue; | 1078 | continue; |
1090 | 1079 | ||
1091 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | 1080 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
@@ -1102,8 +1091,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1102 | goto error_free; | 1091 | goto error_free; |
1103 | } | 1092 | } |
1104 | 1093 | ||
1094 | entry->robj = pt; | ||
1095 | entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; | ||
1096 | entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; | ||
1097 | entry->priority = 0; | ||
1098 | entry->tv.bo = &entry->robj->tbo; | ||
1099 | entry->tv.shared = true; | ||
1105 | vm->page_tables[pt_idx].addr = 0; | 1100 | vm->page_tables[pt_idx].addr = 0; |
1106 | vm->page_tables[pt_idx].bo = pt; | ||
1107 | } | 1101 | } |
1108 | 1102 | ||
1109 | return 0; | 1103 | return 0; |
@@ -1334,7 +1328,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1334 | } | 1328 | } |
1335 | 1329 | ||
1336 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) | 1330 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) |
1337 | amdgpu_bo_unref(&vm->page_tables[i].bo); | 1331 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj); |
1338 | kfree(vm->page_tables); | 1332 | kfree(vm->page_tables); |
1339 | 1333 | ||
1340 | amdgpu_bo_unref(&vm->page_directory); | 1334 | amdgpu_bo_unref(&vm->page_directory); |