aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c20
4 files changed, 43 insertions, 116 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 617ca45734de..d6cd1f57fece 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -301,7 +301,6 @@ struct radeon_bo_va {
301 uint64_t soffset; 301 uint64_t soffset;
302 uint64_t eoffset; 302 uint64_t eoffset;
303 uint32_t flags; 303 uint32_t flags;
304 struct radeon_fence *fence;
305 bool valid; 304 bool valid;
306}; 305};
307 306
@@ -1832,8 +1831,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
1832void radeon_vm_manager_fini(struct radeon_device *rdev); 1831void radeon_vm_manager_fini(struct radeon_device *rdev);
1833int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 1832int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1834void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 1833void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1835int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm); 1834int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
1836void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
1837struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 1835struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
1838 struct radeon_vm *vm, int ring); 1836 struct radeon_vm *vm, int ring);
1839void radeon_vm_fence(struct radeon_device *rdev, 1837void radeon_vm_fence(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index dc4554e0a711..300fc25d9003 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -286,30 +286,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
286 return 0; 286 return 0;
287} 287}
288 288
289static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
290 struct radeon_fence *fence)
291{
292 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
293 struct radeon_vm *vm = &fpriv->vm;
294 struct radeon_bo_list *lobj;
295
296 if (parser->chunk_ib_idx == -1) {
297 return;
298 }
299 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
300 return;
301 }
302
303 list_for_each_entry(lobj, &parser->validated, tv.head) {
304 struct radeon_bo_va *bo_va;
305 struct radeon_bo *rbo = lobj->bo;
306
307 bo_va = radeon_bo_va(rbo, vm);
308 radeon_fence_unref(&bo_va->fence);
309 bo_va->fence = radeon_fence_ref(fence);
310 }
311}
312
313/** 289/**
314 * cs_parser_fini() - clean parser states 290 * cs_parser_fini() - clean parser states
315 * @parser: parser structure holding parsing context. 291 * @parser: parser structure holding parsing context.
@@ -323,8 +299,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
323 unsigned i; 299 unsigned i;
324 300
325 if (!error) { 301 if (!error) {
326 /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
327 radeon_bo_vm_fence_va(parser, parser->ib.fence);
328 ttm_eu_fence_buffer_objects(&parser->validated, 302 ttm_eu_fence_buffer_objects(&parser->validated,
329 parser->ib.fence); 303 parser->ib.fence);
330 } else { 304 } else {
@@ -475,7 +449,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
475 449
476 mutex_lock(&rdev->vm_manager.lock); 450 mutex_lock(&rdev->vm_manager.lock);
477 mutex_lock(&vm->mutex); 451 mutex_lock(&vm->mutex);
478 r = radeon_vm_bind(rdev, vm); 452 r = radeon_vm_alloc_pt(rdev, vm);
479 if (r) { 453 if (r) {
480 goto out; 454 goto out;
481 } 455 }
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 0fd0ba9236a6..da1b2dbe4550 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -478,43 +478,26 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
478 return 0; 478 return 0;
479} 479}
480 480
481/* global mutex must be lock */
482/** 481/**
483 * radeon_vm_unbind_locked - unbind a specific vm 482 * radeon_vm_free_pt - free the page table for a specific vm
484 * 483 *
485 * @rdev: radeon_device pointer 484 * @rdev: radeon_device pointer
486 * @vm: vm to unbind 485 * @vm: vm to unbind
487 * 486 *
488 * Unbind the requested vm (cayman+). 487 * Free the page table of a specific vm (cayman+).
489 * Wait for use of the VM to finish, then unbind the page table, 488 *
490 * and free the page table memory. 489 * Global and local mutex must be lock!
491 */ 490 */
492static void radeon_vm_unbind_locked(struct radeon_device *rdev, 491static void radeon_vm_free_pt(struct radeon_device *rdev,
493 struct radeon_vm *vm) 492 struct radeon_vm *vm)
494{ 493{
495 struct radeon_bo_va *bo_va; 494 struct radeon_bo_va *bo_va;
496 495
497 /* wait for vm use to end */ 496 if (!vm->sa_bo)
498 while (vm->fence) { 497 return;
499 int r;
500 r = radeon_fence_wait(vm->fence, false);
501 if (r)
502 DRM_ERROR("error while waiting for fence: %d\n", r);
503 if (r == -EDEADLK) {
504 mutex_unlock(&rdev->vm_manager.lock);
505 r = radeon_gpu_reset(rdev);
506 mutex_lock(&rdev->vm_manager.lock);
507 if (!r)
508 continue;
509 }
510 break;
511 }
512 radeon_fence_unref(&vm->fence);
513 radeon_fence_unref(&vm->last_flush);
514 498
515 /* hw unbind */
516 list_del_init(&vm->list); 499 list_del_init(&vm->list);
517 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL); 500 radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
518 vm->pt = NULL; 501 vm->pt = NULL;
519 502
520 list_for_each_entry(bo_va, &vm->va, vm_list) { 503 list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -538,9 +521,11 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
538 return; 521 return;
539 522
540 mutex_lock(&rdev->vm_manager.lock); 523 mutex_lock(&rdev->vm_manager.lock);
541 /* unbind all active vm */ 524 /* free all allocated page tables */
542 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { 525 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
543 radeon_vm_unbind_locked(rdev, vm); 526 mutex_lock(&vm->mutex);
527 radeon_vm_free_pt(rdev, vm);
528 mutex_unlock(&vm->mutex);
544 } 529 }
545 for (i = 0; i < RADEON_NUM_VM; ++i) { 530 for (i = 0; i < RADEON_NUM_VM; ++i) {
546 radeon_fence_unref(&rdev->vm_manager.active[i]); 531 radeon_fence_unref(&rdev->vm_manager.active[i]);
@@ -553,36 +538,19 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
553 rdev->vm_manager.enabled = false; 538 rdev->vm_manager.enabled = false;
554} 539}
555 540
556/* global mutex must be locked */
557/**
558 * radeon_vm_unbind - locked version of unbind
559 *
560 * @rdev: radeon_device pointer
561 * @vm: vm to unbind
562 *
563 * Locked version that wraps radeon_vm_unbind_locked (cayman+).
564 */
565void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
566{
567 mutex_lock(&vm->mutex);
568 radeon_vm_unbind_locked(rdev, vm);
569 mutex_unlock(&vm->mutex);
570}
571
572/* global and local mutex must be locked */
573/** 541/**
574 * radeon_vm_bind - bind a page table to a VMID 542 * radeon_vm_alloc_pt - allocates a page table for a VM
575 * 543 *
576 * @rdev: radeon_device pointer 544 * @rdev: radeon_device pointer
577 * @vm: vm to bind 545 * @vm: vm to bind
578 * 546 *
579 * Bind the requested vm (cayman+). 547 * Allocate a page table for the requested vm (cayman+).
580 * Suballocate memory for the page table, allocate a VMID 548 * Also starts to populate the page table.
581 * and bind the page table to it, and finally start to populate
582 * the page table.
583 * Returns 0 for success, error for failure. 549 * Returns 0 for success, error for failure.
550 *
551 * Global and local mutex must be locked!
584 */ 552 */
585int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) 553int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
586{ 554{
587 struct radeon_vm *vm_evict; 555 struct radeon_vm *vm_evict;
588 int r; 556 int r;
@@ -602,14 +570,20 @@ retry:
602 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, 570 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
603 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), 571 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
604 RADEON_GPU_PAGE_SIZE, false); 572 RADEON_GPU_PAGE_SIZE, false);
605 if (r) { 573 if (r == -ENOMEM) {
606 if (list_empty(&rdev->vm_manager.lru_vm)) { 574 if (list_empty(&rdev->vm_manager.lru_vm)) {
607 return r; 575 return r;
608 } 576 }
609 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); 577 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
610 radeon_vm_unbind(rdev, vm_evict); 578 mutex_lock(&vm_evict->mutex);
579 radeon_vm_free_pt(rdev, vm_evict);
580 mutex_unlock(&vm_evict->mutex);
611 goto retry; 581 goto retry;
582
583 } else if (r) {
584 return r;
612 } 585 }
586
613 vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo); 587 vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
614 vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); 588 vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
615 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8)); 589 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
@@ -758,7 +732,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
758 if (last_pfn > vm->last_pfn) { 732 if (last_pfn > vm->last_pfn) {
759 /* grow va space 32M by 32M */ 733 /* grow va space 32M by 32M */
760 unsigned align = ((32 << 20) >> 12) - 1; 734 unsigned align = ((32 << 20) >> 12) - 1;
761 radeon_vm_unbind_locked(rdev, vm); 735 radeon_vm_free_pt(rdev, vm);
762 vm->last_pfn = (last_pfn + align) & ~align; 736 vm->last_pfn = (last_pfn + align) & ~align;
763 } 737 }
764 mutex_unlock(&rdev->vm_manager.lock); 738 mutex_unlock(&rdev->vm_manager.lock);
@@ -886,7 +860,6 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
886 return 0; 860 return 0;
887} 861}
888 862
889/* object have to be reserved */
890/** 863/**
891 * radeon_vm_bo_rmv - remove a bo to a specific vm 864 * radeon_vm_bo_rmv - remove a bo to a specific vm
892 * 865 *
@@ -898,36 +871,22 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
898 * Remove @bo from the list of bos associated with the vm and 871 * Remove @bo from the list of bos associated with the vm and
899 * remove the ptes for @bo in the page table. 872 * remove the ptes for @bo in the page table.
900 * Returns 0 for success. 873 * Returns 0 for success.
874 *
875 * Object have to be reserved!
901 */ 876 */
902int radeon_vm_bo_rmv(struct radeon_device *rdev, 877int radeon_vm_bo_rmv(struct radeon_device *rdev,
903 struct radeon_vm *vm, 878 struct radeon_vm *vm,
904 struct radeon_bo *bo) 879 struct radeon_bo *bo)
905{ 880{
906 struct radeon_bo_va *bo_va; 881 struct radeon_bo_va *bo_va;
907 int r;
908 882
909 bo_va = radeon_bo_va(bo, vm); 883 bo_va = radeon_bo_va(bo, vm);
910 if (bo_va == NULL) 884 if (bo_va == NULL)
911 return 0; 885 return 0;
912 886
913 /* wait for va use to end */
914 while (bo_va->fence) {
915 r = radeon_fence_wait(bo_va->fence, false);
916 if (r) {
917 DRM_ERROR("error while waiting for fence: %d\n", r);
918 }
919 if (r == -EDEADLK) {
920 r = radeon_gpu_reset(rdev);
921 if (!r)
922 continue;
923 }
924 break;
925 }
926 radeon_fence_unref(&bo_va->fence);
927
928 mutex_lock(&rdev->vm_manager.lock); 887 mutex_lock(&rdev->vm_manager.lock);
929 mutex_lock(&vm->mutex); 888 mutex_lock(&vm->mutex);
930 radeon_vm_bo_update_pte(rdev, vm, bo, NULL); 889 radeon_vm_free_pt(rdev, vm);
931 mutex_unlock(&rdev->vm_manager.lock); 890 mutex_unlock(&rdev->vm_manager.lock);
932 list_del(&bo_va->vm_list); 891 list_del(&bo_va->vm_list);
933 mutex_unlock(&vm->mutex); 892 mutex_unlock(&vm->mutex);
@@ -1010,7 +969,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1010 969
1011 mutex_lock(&rdev->vm_manager.lock); 970 mutex_lock(&rdev->vm_manager.lock);
1012 mutex_lock(&vm->mutex); 971 mutex_lock(&vm->mutex);
1013 radeon_vm_unbind_locked(rdev, vm); 972 radeon_vm_free_pt(rdev, vm);
1014 mutex_unlock(&rdev->vm_manager.lock); 973 mutex_unlock(&rdev->vm_manager.lock);
1015 974
1016 /* remove all bo at this point non are busy any more because unbind 975 /* remove all bo at this point non are busy any more because unbind
@@ -1021,7 +980,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1021 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); 980 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
1022 list_del_init(&bo_va->bo_list); 981 list_del_init(&bo_va->bo_list);
1023 list_del_init(&bo_va->vm_list); 982 list_del_init(&bo_va->vm_list);
1024 radeon_fence_unref(&bo_va->fence);
1025 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 983 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
1026 kfree(bo_va); 984 kfree(bo_va);
1027 } 985 }
@@ -1033,10 +991,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1033 r = radeon_bo_reserve(bo_va->bo, false); 991 r = radeon_bo_reserve(bo_va->bo, false);
1034 if (!r) { 992 if (!r) {
1035 list_del_init(&bo_va->bo_list); 993 list_del_init(&bo_va->bo_list);
1036 radeon_fence_unref(&bo_va->fence);
1037 radeon_bo_unreserve(bo_va->bo); 994 radeon_bo_unreserve(bo_va->bo);
1038 kfree(bo_va); 995 kfree(bo_va);
1039 } 996 }
1040 } 997 }
998 radeon_fence_unref(&vm->fence);
999 radeon_fence_unref(&vm->last_flush);
1041 mutex_unlock(&vm->mutex); 1000 mutex_unlock(&vm->mutex);
1042} 1001}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 4e771240fdd0..105fde69d045 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -316,7 +316,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
316{ 316{
317 struct radeon_fence *fences[RADEON_NUM_RINGS]; 317 struct radeon_fence *fences[RADEON_NUM_RINGS];
318 unsigned tries[RADEON_NUM_RINGS]; 318 unsigned tries[RADEON_NUM_RINGS];
319 int i, r = -ENOMEM; 319 int i, r;
320 320
321 BUG_ON(align > RADEON_GPU_PAGE_SIZE); 321 BUG_ON(align > RADEON_GPU_PAGE_SIZE);
322 BUG_ON(size > sa_manager->size); 322 BUG_ON(size > sa_manager->size);
@@ -331,7 +331,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
331 INIT_LIST_HEAD(&(*sa_bo)->flist); 331 INIT_LIST_HEAD(&(*sa_bo)->flist);
332 332
333 spin_lock(&sa_manager->wq.lock); 333 spin_lock(&sa_manager->wq.lock);
334 while(1) { 334 do {
335 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 335 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
336 fences[i] = NULL; 336 fences[i] = NULL;
337 tries[i] = 0; 337 tries[i] = 0;
@@ -349,26 +349,22 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
349 /* see if we can skip over some allocations */ 349 /* see if we can skip over some allocations */
350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
351 351
352 if (!block) {
353 break;
354 }
355
356 spin_unlock(&sa_manager->wq.lock); 352 spin_unlock(&sa_manager->wq.lock);
357 r = radeon_fence_wait_any(rdev, fences, false); 353 r = radeon_fence_wait_any(rdev, fences, false);
358 spin_lock(&sa_manager->wq.lock); 354 spin_lock(&sa_manager->wq.lock);
359 /* if we have nothing to wait for block */ 355 /* if we have nothing to wait for block */
360 if (r == -ENOENT) { 356 if (r == -ENOENT && block) {
361 r = wait_event_interruptible_locked( 357 r = wait_event_interruptible_locked(
362 sa_manager->wq, 358 sa_manager->wq,
363 radeon_sa_event(sa_manager, size, align) 359 radeon_sa_event(sa_manager, size, align)
364 ); 360 );
361
362 } else if (r == -ENOENT) {
363 r = -ENOMEM;
365 } 364 }
366 if (r) {
367 goto out_err;
368 }
369 };
370 365
371out_err: 366 } while (!r);
367
372 spin_unlock(&sa_manager->wq.lock); 368 spin_unlock(&sa_manager->wq.lock);
373 kfree(*sa_bo); 369 kfree(*sa_bo);
374 *sa_bo = NULL; 370 *sa_bo = NULL;