aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 21:57:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 21:57:59 -0400
commitc48c43e422c1404fd72c57d1d21a6f6d01e18900 (patch)
tree48e5d3828b4f5479361986535f71a1ae44e4f3c1 /drivers/gpu/drm/ttm
parent520045db940a381d2bee1c1b2179f7921b40fb10 (diff)
parent135cba0dc399fdd47bd3ae305c1db75fcd77243f (diff)
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (476 commits) vmwgfx: Implement a proper GMR eviction mechanism drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2 drm/radeon/kms: properly compute group_size on 6xx/7xx drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker drm/radeon/kms/evergreen: set the clear state to the blit state drm/radeon/kms: don't poll dac load detect. gpu: Add Intel GMA500(Poulsbo) Stub Driver drm/radeon/kms: MC vram map needs to be >= pci aperture size drm/radeon/kms: implement display watermark support for evergreen drm/radeon/kms/evergreen: add some additional safe regs v2 drm/radeon/r600: fix tiling issues in CS checker. drm/i915: Move gpu_write_list to per-ring drm/i915: Invalidate the to-ring, flush the old-ring when updating domains drm/i915/ringbuffer: Write the value passed in to the tail register agp/intel: Restore valid PTE bit for Sandybridge after bdd3072 drm/i915: Fix flushing regression from 9af90d19f drm/i915/sdvo: Remove unused encoding member i915: enable AVI infoframe for intel_hdmi.c [v4] drm/i915: Fix current fb blocking for page flip drm/i915: IS_IRONLAKE is synonymous with gen == 5 ... Fix up conflicts in - drivers/gpu/drm/i915/{i915_gem.c, i915/intel_overlay.c}: due to the new simplified stack-based kmap_atomic() interface - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c: added .llseek entry due to BKL removal cleanups.
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c305
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c148
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c12
5 files changed, 293 insertions, 178 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b256d4adfaf..f3cf6f02c99 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,7 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
8 ttm_bo_manager.o
8 9
9obj-$(CONFIG_DRM_TTM) += ttm.o 10obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4bf69c40449..f999e36f30b 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
74{ 74{
75 struct ttm_agp_backend *agp_be = 75 struct ttm_agp_backend *agp_be =
76 container_of(backend, struct ttm_agp_backend, backend); 76 container_of(backend, struct ttm_agp_backend, backend);
77 struct drm_mm_node *node = bo_mem->mm_node;
77 struct agp_memory *mem = agp_be->mem; 78 struct agp_memory *mem = agp_be->mem;
78 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); 79 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
79 int ret; 80 int ret;
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
81 mem->is_flushed = 1; 82 mem->is_flushed = 1;
82 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; 83 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
83 84
84 ret = agp_bind_memory(mem, bo_mem->mm_node->start); 85 ret = agp_bind_memory(mem, node->start);
85 if (ret) 86 if (ret)
86 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); 87 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
87 88
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index db809e034cc..a1cb783c713 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
84 man->available_caching); 84 man->available_caching);
85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", 85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
86 man->default_caching); 86 man->default_caching);
87 if (mem_type != TTM_PL_SYSTEM) { 87 if (mem_type != TTM_PL_SYSTEM)
88 spin_lock(&bdev->glob->lru_lock); 88 (*man->func->debug)(man, TTM_PFX);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&bdev->glob->lru_lock);
91 }
92} 89}
93 90
94static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 91static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -169,18 +166,13 @@ static void ttm_bo_release_list(struct kref *list_kref)
169 166
170int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) 167int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
171{ 168{
172
173 if (interruptible) { 169 if (interruptible) {
174 int ret = 0; 170 return wait_event_interruptible(bo->event_queue,
175
176 ret = wait_event_interruptible(bo->event_queue,
177 atomic_read(&bo->reserved) == 0); 171 atomic_read(&bo->reserved) == 0);
178 if (unlikely(ret != 0))
179 return ret;
180 } else { 172 } else {
181 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); 173 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
174 return 0;
182 } 175 }
183 return 0;
184} 176}
185EXPORT_SYMBOL(ttm_bo_wait_unreserved); 177EXPORT_SYMBOL(ttm_bo_wait_unreserved);
186 178
@@ -421,7 +413,7 @@ moved:
421 413
422 if (bo->mem.mm_node) { 414 if (bo->mem.mm_node) {
423 spin_lock(&bo->lock); 415 spin_lock(&bo->lock);
424 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + 416 bo->offset = (bo->mem.start << PAGE_SHIFT) +
425 bdev->man[bo->mem.mem_type].gpu_offset; 417 bdev->man[bo->mem.mem_type].gpu_offset;
426 bo->cur_placement = bo->mem.placement; 418 bo->cur_placement = bo->mem.placement;
427 spin_unlock(&bo->lock); 419 spin_unlock(&bo->lock);
@@ -442,135 +434,144 @@ out_err:
442} 434}
443 435
444/** 436/**
445 * Call bo::reserved and with the lru lock held. 437 * Call bo::reserved.
446 * Will release GPU memory type usage on destruction. 438 * Will release GPU memory type usage on destruction.
447 * This is the place to put in driver specific hooks. 439 * This is the place to put in driver specific hooks to release
448 * Will release the bo::reserved lock and the 440 * driver private resources.
449 * lru lock on exit. 441 * Will release the bo::reserved lock.
450 */ 442 */
451 443
452static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 444static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
453{ 445{
454 struct ttm_bo_global *glob = bo->glob;
455
456 if (bo->ttm) { 446 if (bo->ttm) {
457
458 /**
459 * Release the lru_lock, since we don't want to have
460 * an atomic requirement on ttm_tt[unbind|destroy].
461 */
462
463 spin_unlock(&glob->lru_lock);
464 ttm_tt_unbind(bo->ttm); 447 ttm_tt_unbind(bo->ttm);
465 ttm_tt_destroy(bo->ttm); 448 ttm_tt_destroy(bo->ttm);
466 bo->ttm = NULL; 449 bo->ttm = NULL;
467 spin_lock(&glob->lru_lock);
468 } 450 }
469 451
470 if (bo->mem.mm_node) { 452 ttm_bo_mem_put(bo, &bo->mem);
471 drm_mm_put_block(bo->mem.mm_node);
472 bo->mem.mm_node = NULL;
473 }
474 453
475 atomic_set(&bo->reserved, 0); 454 atomic_set(&bo->reserved, 0);
476 wake_up_all(&bo->event_queue); 455 wake_up_all(&bo->event_queue);
477 spin_unlock(&glob->lru_lock);
478} 456}
479 457
480 458static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
481/**
482 * If bo idle, remove from delayed- and lru lists, and unref.
483 * If not idle, and already on delayed list, do nothing.
484 * If not idle, and not on delayed list, put on delayed list,
485 * up the list_kref and schedule a delayed list check.
486 */
487
488static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
489{ 459{
490 struct ttm_bo_device *bdev = bo->bdev; 460 struct ttm_bo_device *bdev = bo->bdev;
491 struct ttm_bo_global *glob = bo->glob; 461 struct ttm_bo_global *glob = bo->glob;
492 struct ttm_bo_driver *driver = bdev->driver; 462 struct ttm_bo_driver *driver;
463 void *sync_obj;
464 void *sync_obj_arg;
465 int put_count;
493 int ret; 466 int ret;
494 467
495 spin_lock(&bo->lock); 468 spin_lock(&bo->lock);
496retry: 469 (void) ttm_bo_wait(bo, false, false, true);
497 (void) ttm_bo_wait(bo, false, false, !remove_all);
498
499 if (!bo->sync_obj) { 470 if (!bo->sync_obj) {
500 int put_count;
501
502 spin_unlock(&bo->lock);
503 471
504 spin_lock(&glob->lru_lock); 472 spin_lock(&glob->lru_lock);
505 ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
506 473
507 /** 474 /**
508 * Someone else has the object reserved. Bail and retry. 475 * Lock inversion between bo::reserve and bo::lock here,
476 * but that's OK, since we're only trylocking.
509 */ 477 */
510 478
511 if (unlikely(ret == -EBUSY)) { 479 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
512 spin_unlock(&glob->lru_lock);
513 spin_lock(&bo->lock);
514 goto requeue;
515 }
516
517 /**
518 * We can re-check for sync object without taking
519 * the bo::lock since setting the sync object requires
520 * also bo::reserved. A busy object at this point may
521 * be caused by another thread starting an accelerated
522 * eviction.
523 */
524 480
525 if (unlikely(bo->sync_obj)) { 481 if (unlikely(ret == -EBUSY))
526 atomic_set(&bo->reserved, 0); 482 goto queue;
527 wake_up_all(&bo->event_queue);
528 spin_unlock(&glob->lru_lock);
529 spin_lock(&bo->lock);
530 if (remove_all)
531 goto retry;
532 else
533 goto requeue;
534 }
535 483
484 spin_unlock(&bo->lock);
536 put_count = ttm_bo_del_from_lru(bo); 485 put_count = ttm_bo_del_from_lru(bo);
537 486
538 if (!list_empty(&bo->ddestroy)) { 487 spin_unlock(&glob->lru_lock);
539 list_del_init(&bo->ddestroy);
540 ++put_count;
541 }
542
543 ttm_bo_cleanup_memtype_use(bo); 488 ttm_bo_cleanup_memtype_use(bo);
544 489
545 while (put_count--) 490 while (put_count--)
546 kref_put(&bo->list_kref, ttm_bo_ref_bug); 491 kref_put(&bo->list_kref, ttm_bo_ref_bug);
547 492
548 return 0; 493 return;
494 } else {
495 spin_lock(&glob->lru_lock);
549 } 496 }
550requeue: 497queue:
498 sync_obj = bo->sync_obj;
499 sync_obj_arg = bo->sync_obj_arg;
500 driver = bdev->driver;
501
502 kref_get(&bo->list_kref);
503 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
504 spin_unlock(&glob->lru_lock);
505 spin_unlock(&bo->lock);
506
507 if (sync_obj)
508 driver->sync_obj_flush(sync_obj, sync_obj_arg);
509 schedule_delayed_work(&bdev->wq,
510 ((HZ / 100) < 1) ? 1 : HZ / 100);
511}
512
513/**
514 * function ttm_bo_cleanup_refs
515 * If bo idle, remove from delayed- and lru lists, and unref.
516 * If not idle, do nothing.
517 *
518 * @interruptible Any sleeps should occur interruptibly.
519 * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
520 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
521 */
522
523static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
524 bool interruptible,
525 bool no_wait_reserve,
526 bool no_wait_gpu)
527{
528 struct ttm_bo_global *glob = bo->glob;
529 int put_count;
530 int ret = 0;
531
532retry:
533 spin_lock(&bo->lock);
534 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
535 spin_unlock(&bo->lock);
536
537 if (unlikely(ret != 0))
538 return ret;
539
551 spin_lock(&glob->lru_lock); 540 spin_lock(&glob->lru_lock);
552 if (list_empty(&bo->ddestroy)) { 541 ret = ttm_bo_reserve_locked(bo, interruptible,
553 void *sync_obj = bo->sync_obj; 542 no_wait_reserve, false, 0);
554 void *sync_obj_arg = bo->sync_obj_arg;
555 543
556 kref_get(&bo->list_kref); 544 if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
557 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
558 spin_unlock(&glob->lru_lock); 545 spin_unlock(&glob->lru_lock);
559 spin_unlock(&bo->lock); 546 return ret;
547 }
560 548
561 if (sync_obj) 549 /**
562 driver->sync_obj_flush(sync_obj, sync_obj_arg); 550 * We can re-check for sync object without taking
563 schedule_delayed_work(&bdev->wq, 551 * the bo::lock since setting the sync object requires
564 ((HZ / 100) < 1) ? 1 : HZ / 100); 552 * also bo::reserved. A busy object at this point may
565 ret = 0; 553 * be caused by another thread recently starting an accelerated
554 * eviction.
555 */
566 556
567 } else { 557 if (unlikely(bo->sync_obj)) {
558 atomic_set(&bo->reserved, 0);
559 wake_up_all(&bo->event_queue);
568 spin_unlock(&glob->lru_lock); 560 spin_unlock(&glob->lru_lock);
569 spin_unlock(&bo->lock); 561 goto retry;
570 ret = -EBUSY;
571 } 562 }
572 563
573 return ret; 564 put_count = ttm_bo_del_from_lru(bo);
565 list_del_init(&bo->ddestroy);
566 ++put_count;
567
568 spin_unlock(&glob->lru_lock);
569 ttm_bo_cleanup_memtype_use(bo);
570
571 while (put_count--)
572 kref_put(&bo->list_kref, ttm_bo_ref_bug);
573
574 return 0;
574} 575}
575 576
576/** 577/**
@@ -602,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
602 } 603 }
603 604
604 spin_unlock(&glob->lru_lock); 605 spin_unlock(&glob->lru_lock);
605 ret = ttm_bo_cleanup_refs(entry, remove_all); 606 ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
607 !remove_all);
606 kref_put(&entry->list_kref, ttm_bo_release_list); 608 kref_put(&entry->list_kref, ttm_bo_release_list);
607 entry = nentry; 609 entry = nentry;
608 610
@@ -645,7 +647,7 @@ static void ttm_bo_release(struct kref *kref)
645 bo->vm_node = NULL; 647 bo->vm_node = NULL;
646 } 648 }
647 write_unlock(&bdev->vm_lock); 649 write_unlock(&bdev->vm_lock);
648 ttm_bo_cleanup_refs(bo, false); 650 ttm_bo_cleanup_refs_or_queue(bo);
649 kref_put(&bo->list_kref, ttm_bo_release_list); 651 kref_put(&bo->list_kref, ttm_bo_release_list);
650 write_lock(&bdev->vm_lock); 652 write_lock(&bdev->vm_lock);
651} 653}
@@ -680,7 +682,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
680 bool no_wait_reserve, bool no_wait_gpu) 682 bool no_wait_reserve, bool no_wait_gpu)
681{ 683{
682 struct ttm_bo_device *bdev = bo->bdev; 684 struct ttm_bo_device *bdev = bo->bdev;
683 struct ttm_bo_global *glob = bo->glob;
684 struct ttm_mem_reg evict_mem; 685 struct ttm_mem_reg evict_mem;
685 struct ttm_placement placement; 686 struct ttm_placement placement;
686 int ret = 0; 687 int ret = 0;
@@ -726,12 +727,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
726 if (ret) { 727 if (ret) {
727 if (ret != -ERESTARTSYS) 728 if (ret != -ERESTARTSYS)
728 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 729 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
729 spin_lock(&glob->lru_lock); 730 ttm_bo_mem_put(bo, &evict_mem);
730 if (evict_mem.mm_node) {
731 drm_mm_put_block(evict_mem.mm_node);
732 evict_mem.mm_node = NULL;
733 }
734 spin_unlock(&glob->lru_lock);
735 goto out; 731 goto out;
736 } 732 }
737 bo->evicted = true; 733 bo->evicted = true;
@@ -759,6 +755,18 @@ retry:
759 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 755 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
760 kref_get(&bo->list_kref); 756 kref_get(&bo->list_kref);
761 757
758 if (!list_empty(&bo->ddestroy)) {
759 spin_unlock(&glob->lru_lock);
760 ret = ttm_bo_cleanup_refs(bo, interruptible,
761 no_wait_reserve, no_wait_gpu);
762 kref_put(&bo->list_kref, ttm_bo_release_list);
763
764 if (likely(ret == 0 || ret == -ERESTARTSYS))
765 return ret;
766
767 goto retry;
768 }
769
762 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); 770 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
763 771
764 if (unlikely(ret == -EBUSY)) { 772 if (unlikely(ret == -EBUSY)) {
@@ -792,41 +800,14 @@ retry:
792 return ret; 800 return ret;
793} 801}
794 802
795static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, 803void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
796 struct ttm_mem_type_manager *man,
797 struct ttm_placement *placement,
798 struct ttm_mem_reg *mem,
799 struct drm_mm_node **node)
800{ 804{
801 struct ttm_bo_global *glob = bo->glob; 805 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
802 unsigned long lpfn;
803 int ret;
804 806
805 lpfn = placement->lpfn; 807 if (mem->mm_node)
806 if (!lpfn) 808 (*man->func->put_node)(man, mem);
807 lpfn = man->size;
808 *node = NULL;
809 do {
810 ret = drm_mm_pre_get(&man->manager);
811 if (unlikely(ret))
812 return ret;
813
814 spin_lock(&glob->lru_lock);
815 *node = drm_mm_search_free_in_range(&man->manager,
816 mem->num_pages, mem->page_alignment,
817 placement->fpfn, lpfn, 1);
818 if (unlikely(*node == NULL)) {
819 spin_unlock(&glob->lru_lock);
820 return 0;
821 }
822 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
823 mem->page_alignment,
824 placement->fpfn,
825 lpfn);
826 spin_unlock(&glob->lru_lock);
827 } while (*node == NULL);
828 return 0;
829} 809}
810EXPORT_SYMBOL(ttm_bo_mem_put);
830 811
831/** 812/**
832 * Repeatedly evict memory from the LRU for @mem_type until we create enough 813 * Repeatedly evict memory from the LRU for @mem_type until we create enough
@@ -843,14 +824,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
843 struct ttm_bo_device *bdev = bo->bdev; 824 struct ttm_bo_device *bdev = bo->bdev;
844 struct ttm_bo_global *glob = bdev->glob; 825 struct ttm_bo_global *glob = bdev->glob;
845 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 826 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
846 struct drm_mm_node *node;
847 int ret; 827 int ret;
848 828
849 do { 829 do {
850 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); 830 ret = (*man->func->get_node)(man, bo, placement, mem);
851 if (unlikely(ret != 0)) 831 if (unlikely(ret != 0))
852 return ret; 832 return ret;
853 if (node) 833 if (mem->mm_node)
854 break; 834 break;
855 spin_lock(&glob->lru_lock); 835 spin_lock(&glob->lru_lock);
856 if (list_empty(&man->lru)) { 836 if (list_empty(&man->lru)) {
@@ -863,9 +843,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
863 if (unlikely(ret != 0)) 843 if (unlikely(ret != 0))
864 return ret; 844 return ret;
865 } while (1); 845 } while (1);
866 if (node == NULL) 846 if (mem->mm_node == NULL)
867 return -ENOMEM; 847 return -ENOMEM;
868 mem->mm_node = node;
869 mem->mem_type = mem_type; 848 mem->mem_type = mem_type;
870 return 0; 849 return 0;
871} 850}
@@ -939,7 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
939 bool type_found = false; 918 bool type_found = false;
940 bool type_ok = false; 919 bool type_ok = false;
941 bool has_erestartsys = false; 920 bool has_erestartsys = false;
942 struct drm_mm_node *node = NULL;
943 int i, ret; 921 int i, ret;
944 922
945 mem->mm_node = NULL; 923 mem->mm_node = NULL;
@@ -973,17 +951,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
973 951
974 if (man->has_type && man->use_type) { 952 if (man->has_type && man->use_type) {
975 type_found = true; 953 type_found = true;
976 ret = ttm_bo_man_get_node(bo, man, placement, mem, 954 ret = (*man->func->get_node)(man, bo, placement, mem);
977 &node);
978 if (unlikely(ret)) 955 if (unlikely(ret))
979 return ret; 956 return ret;
980 } 957 }
981 if (node) 958 if (mem->mm_node)
982 break; 959 break;
983 } 960 }
984 961
985 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { 962 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
986 mem->mm_node = node;
987 mem->mem_type = mem_type; 963 mem->mem_type = mem_type;
988 mem->placement = cur_flags; 964 mem->placement = cur_flags;
989 return 0; 965 return 0;
@@ -1053,7 +1029,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1053 bool interruptible, bool no_wait_reserve, 1029 bool interruptible, bool no_wait_reserve,
1054 bool no_wait_gpu) 1030 bool no_wait_gpu)
1055{ 1031{
1056 struct ttm_bo_global *glob = bo->glob;
1057 int ret = 0; 1032 int ret = 0;
1058 struct ttm_mem_reg mem; 1033 struct ttm_mem_reg mem;
1059 1034
@@ -1081,11 +1056,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1081 goto out_unlock; 1056 goto out_unlock;
1082 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); 1057 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1083out_unlock: 1058out_unlock:
1084 if (ret && mem.mm_node) { 1059 if (ret && mem.mm_node)
1085 spin_lock(&glob->lru_lock); 1060 ttm_bo_mem_put(bo, &mem);
1086 drm_mm_put_block(mem.mm_node);
1087 spin_unlock(&glob->lru_lock);
1088 }
1089 return ret; 1061 return ret;
1090} 1062}
1091 1063
@@ -1093,11 +1065,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1093 struct ttm_mem_reg *mem) 1065 struct ttm_mem_reg *mem)
1094{ 1066{
1095 int i; 1067 int i;
1096 struct drm_mm_node *node = mem->mm_node;
1097 1068
1098 if (node && placement->lpfn != 0 && 1069 if (mem->mm_node && placement->lpfn != 0 &&
1099 (node->start < placement->fpfn || 1070 (mem->start < placement->fpfn ||
1100 node->start + node->size > placement->lpfn)) 1071 mem->start + mem->num_pages > placement->lpfn))
1101 return -1; 1072 return -1;
1102 1073
1103 for (i = 0; i < placement->num_placement; i++) { 1074 for (i = 0; i < placement->num_placement; i++) {
@@ -1341,7 +1312,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1341 1312
1342int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1313int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1343{ 1314{
1344 struct ttm_bo_global *glob = bdev->glob;
1345 struct ttm_mem_type_manager *man; 1315 struct ttm_mem_type_manager *man;
1346 int ret = -EINVAL; 1316 int ret = -EINVAL;
1347 1317
@@ -1364,13 +1334,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1364 if (mem_type > 0) { 1334 if (mem_type > 0) {
1365 ttm_bo_force_list_clean(bdev, mem_type, false); 1335 ttm_bo_force_list_clean(bdev, mem_type, false);
1366 1336
1367 spin_lock(&glob->lru_lock); 1337 ret = (*man->func->takedown)(man);
1368 if (drm_mm_clean(&man->manager))
1369 drm_mm_takedown(&man->manager);
1370 else
1371 ret = -EBUSY;
1372
1373 spin_unlock(&glob->lru_lock);
1374 } 1338 }
1375 1339
1376 return ret; 1340 return ret;
@@ -1421,6 +1385,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1421 ret = bdev->driver->init_mem_type(bdev, type, man); 1385 ret = bdev->driver->init_mem_type(bdev, type, man);
1422 if (ret) 1386 if (ret)
1423 return ret; 1387 return ret;
1388 man->bdev = bdev;
1424 1389
1425 ret = 0; 1390 ret = 0;
1426 if (type != TTM_PL_SYSTEM) { 1391 if (type != TTM_PL_SYSTEM) {
@@ -1430,7 +1395,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1430 type); 1395 type);
1431 return ret; 1396 return ret;
1432 } 1397 }
1433 ret = drm_mm_init(&man->manager, 0, p_size); 1398
1399 ret = (*man->func->init)(man, p_size);
1434 if (ret) 1400 if (ret)
1435 return ret; 1401 return ret;
1436 } 1402 }
@@ -1824,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1824 struct ttm_buffer_object, swap); 1790 struct ttm_buffer_object, swap);
1825 kref_get(&bo->list_kref); 1791 kref_get(&bo->list_kref);
1826 1792
1793 if (!list_empty(&bo->ddestroy)) {
1794 spin_unlock(&glob->lru_lock);
1795 (void) ttm_bo_cleanup_refs(bo, false, false, false);
1796 kref_put(&bo->list_kref, ttm_bo_release_list);
1797 continue;
1798 }
1799
1827 /** 1800 /**
1828 * Reserve buffer. Since we unlock while sleeping, we need 1801 * Reserve buffer. Since we unlock while sleeping, we need
1829 * to re-check that nobody removed us from the swap-list while 1802 * to re-check that nobody removed us from the swap-list while
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644
index 00000000000..7410c190c89
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -0,0 +1,148 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h>
40
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem)
45{
46 struct ttm_bo_global *glob = man->bdev->glob;
47 struct drm_mm *mm = man->priv;
48 struct drm_mm_node *node = NULL;
49 unsigned long lpfn;
50 int ret;
51
52 lpfn = placement->lpfn;
53 if (!lpfn)
54 lpfn = man->size;
55 do {
56 ret = drm_mm_pre_get(mm);
57 if (unlikely(ret))
58 return ret;
59
60 spin_lock(&glob->lru_lock);
61 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock);
66 return 0;
67 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment,
70 placement->fpfn,
71 lpfn);
72 spin_unlock(&glob->lru_lock);
73 } while (node == NULL);
74
75 mem->mm_node = node;
76 mem->start = node->start;
77 return 0;
78}
79
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem)
82{
83 struct ttm_bo_global *glob = man->bdev->glob;
84
85 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock);
87 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock);
89 mem->mm_node = NULL;
90 }
91}
92
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size)
95{
96 struct drm_mm *mm;
97 int ret;
98
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
100 if (!mm)
101 return -ENOMEM;
102
103 ret = drm_mm_init(mm, 0, p_size);
104 if (ret) {
105 kfree(mm);
106 return ret;
107 }
108
109 man->priv = mm;
110 return 0;
111}
112
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{
115 struct ttm_bo_global *glob = man->bdev->glob;
116 struct drm_mm *mm = man->priv;
117 int ret = 0;
118
119 spin_lock(&glob->lru_lock);
120 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm);
122 kfree(mm);
123 man->priv = NULL;
124 } else
125 ret = -EBUSY;
126 spin_unlock(&glob->lru_lock);
127 return ret;
128}
129
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix)
132{
133 struct ttm_bo_global *glob = man->bdev->glob;
134 struct drm_mm *mm = man->priv;
135
136 spin_lock(&glob->lru_lock);
137 drm_mm_debug_table(mm, prefix);
138 spin_unlock(&glob->lru_lock);
139}
140
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
142 ttm_bo_man_init,
143 ttm_bo_man_takedown,
144 ttm_bo_man_get_node,
145 ttm_bo_man_put_node,
146 ttm_bo_man_debug
147};
148EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index e8a73e65da6..3106d5bcce3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -39,14 +39,7 @@
39 39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{ 41{
42 struct ttm_mem_reg *old_mem = &bo->mem; 42 ttm_bo_mem_put(bo, &bo->mem);
43
44 if (old_mem->mm_node) {
45 spin_lock(&bo->glob->lru_lock);
46 drm_mm_put_block(old_mem->mm_node);
47 spin_unlock(&bo->glob->lru_lock);
48 }
49 old_mem->mm_node = NULL;
50} 43}
51 44
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
@@ -263,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
263 dir = 1; 256 dir = 1;
264 257
265 if ((old_mem->mem_type == new_mem->mem_type) && 258 if ((old_mem->mem_type == new_mem->mem_type) &&
266 (new_mem->mm_node->start < 259 (new_mem->start < old_mem->start + old_mem->size)) {
267 old_mem->mm_node->start + old_mem->mm_node->size)) {
268 dir = -1; 260 dir = -1;
269 add = new_mem->num_pages - 1; 261 add = new_mem->num_pages - 1;
270 } 262 }