aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-12-08 09:33:32 -0500
committerDave Airlie <airlied@redhat.com>2009-12-10 00:09:02 -0500
commitca262a9998d46196750bb19a9dc4bd465b170ff7 (patch)
treeb128691e5c57f6305c5752ac5c1b09e6aedfb650 /drivers/gpu
parenta2e68e92d384d37c8cc6bb7206d43b1eb9bc3f08 (diff)
drm/ttm: Rework validation & memory space allocation (V3)
This change allow driver to pass sorted memory placement, from most prefered placement to least prefered placement. In order to avoid long function prototype a structure is used to gather memory placement informations such as range restriction (if you need a buffer to be in given range). Range restriction is determined by fpfn & lpfn which are the first page and last page number btw which allocation can happen. If those fields are set to 0 ttm will assume buffer can be put anywhere in the address space (thus it avoids putting a burden on the driver to always properly set those fields). This patch also factor few functions like evicting first entry of lru list or getting a memory space. This avoid code duplication. V2: Change API to use placement flags and array instead of packing placement order into a quadword. V3: Make sure we set the appropriate mem.placement flag when validating or allocation memory space. [Pending Thomas Hellstrom further review but okay from preliminary review so far]. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c463
1 files changed, 221 insertions, 242 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e13fd23f3334..60d8179a8bcd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
30 38
31#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
@@ -247,7 +255,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
247/* 255/*
248 * Call bo->mutex locked. 256 * Call bo->mutex locked.
249 */ 257 */
250
251static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 258static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
252{ 259{
253 struct ttm_bo_device *bdev = bo->bdev; 260 struct ttm_bo_device *bdev = bo->bdev;
@@ -329,14 +336,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
329 } 336 }
330 337
331 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 338 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
332 339 bo->mem = *mem;
333 struct ttm_mem_reg *old_mem = &bo->mem;
334 uint32_t save_flags = old_mem->placement;
335
336 *old_mem = *mem;
337 mem->mm_node = NULL; 340 mem->mm_node = NULL;
338 ttm_flag_masked(&save_flags, mem->placement,
339 TTM_PL_MASK_MEMTYPE);
340 goto moved; 341 goto moved;
341 } 342 }
342 343
@@ -419,6 +420,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
419 kref_put(&bo->list_kref, ttm_bo_ref_bug); 420 kref_put(&bo->list_kref, ttm_bo_ref_bug);
420 } 421 }
421 if (bo->mem.mm_node) { 422 if (bo->mem.mm_node) {
423 bo->mem.mm_node->private = NULL;
422 drm_mm_put_block(bo->mem.mm_node); 424 drm_mm_put_block(bo->mem.mm_node);
423 bo->mem.mm_node = NULL; 425 bo->mem.mm_node = NULL;
424 } 426 }
@@ -555,17 +557,14 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
555} 557}
556EXPORT_SYMBOL(ttm_bo_unref); 558EXPORT_SYMBOL(ttm_bo_unref);
557 559
558static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, 560static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
559 bool interruptible, bool no_wait) 561 bool no_wait)
560{ 562{
561 int ret = 0;
562 struct ttm_bo_device *bdev = bo->bdev; 563 struct ttm_bo_device *bdev = bo->bdev;
563 struct ttm_bo_global *glob = bo->glob; 564 struct ttm_bo_global *glob = bo->glob;
564 struct ttm_mem_reg evict_mem; 565 struct ttm_mem_reg evict_mem;
565 uint32_t proposed_placement; 566 struct ttm_placement placement;
566 567 int ret = 0;
567 if (bo->mem.mem_type != mem_type)
568 goto out;
569 568
570 spin_lock(&bo->lock); 569 spin_lock(&bo->lock);
571 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 570 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
@@ -585,14 +584,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
585 evict_mem = bo->mem; 584 evict_mem = bo->mem;
586 evict_mem.mm_node = NULL; 585 evict_mem.mm_node = NULL;
587 586
588 proposed_placement = bdev->driver->evict_flags(bo); 587 bdev->driver->evict_flags(bo, &placement);
589 588 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
590 ret = ttm_bo_mem_space(bo, proposed_placement, 589 no_wait);
591 &evict_mem, interruptible, no_wait);
592 if (unlikely(ret != 0 && ret != -ERESTART))
593 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
594 &evict_mem, interruptible, no_wait);
595
596 if (ret) { 590 if (ret) {
597 if (ret != -ERESTART) 591 if (ret != -ERESTART)
598 printk(KERN_ERR TTM_PFX 592 printk(KERN_ERR TTM_PFX
@@ -606,95 +600,117 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
606 if (ret) { 600 if (ret) {
607 if (ret != -ERESTART) 601 if (ret != -ERESTART)
608 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 602 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
603 spin_lock(&glob->lru_lock);
604 if (evict_mem.mm_node) {
605 evict_mem.mm_node->private = NULL;
606 drm_mm_put_block(evict_mem.mm_node);
607 evict_mem.mm_node = NULL;
608 }
609 spin_unlock(&glob->lru_lock);
609 goto out; 610 goto out;
610 } 611 }
612 bo->evicted = true;
613out:
614 return ret;
615}
616
617static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
618 uint32_t mem_type,
619 bool interruptible, bool no_wait)
620{
621 struct ttm_bo_global *glob = bdev->glob;
622 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
623 struct ttm_buffer_object *bo;
624 int ret, put_count = 0;
611 625
612 spin_lock(&glob->lru_lock); 626 spin_lock(&glob->lru_lock);
613 if (evict_mem.mm_node) { 627 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
614 drm_mm_put_block(evict_mem.mm_node); 628 kref_get(&bo->list_kref);
615 evict_mem.mm_node = NULL; 629 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
616 } 630 if (likely(ret == 0))
631 put_count = ttm_bo_del_from_lru(bo);
617 spin_unlock(&glob->lru_lock); 632 spin_unlock(&glob->lru_lock);
618 bo->evicted = true; 633 if (unlikely(ret != 0))
619out: 634 return ret;
635 while (put_count--)
636 kref_put(&bo->list_kref, ttm_bo_ref_bug);
637 ret = ttm_bo_evict(bo, interruptible, no_wait);
638 ttm_bo_unreserve(bo);
639 kref_put(&bo->list_kref, ttm_bo_release_list);
620 return ret; 640 return ret;
621} 641}
622 642
643static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
644 struct ttm_mem_type_manager *man,
645 struct ttm_placement *placement,
646 struct ttm_mem_reg *mem,
647 struct drm_mm_node **node)
648{
649 struct ttm_bo_global *glob = bo->glob;
650 unsigned long lpfn;
651 int ret;
652
653 lpfn = placement->lpfn;
654 if (!lpfn)
655 lpfn = man->size;
656 *node = NULL;
657 do {
658 ret = drm_mm_pre_get(&man->manager);
659 if (unlikely(ret))
660 return ret;
661
662 spin_lock(&glob->lru_lock);
663 *node = drm_mm_search_free_in_range(&man->manager,
664 mem->num_pages, mem->page_alignment,
665 placement->fpfn, lpfn, 1);
666 if (unlikely(*node == NULL)) {
667 spin_unlock(&glob->lru_lock);
668 return 0;
669 }
670 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
671 mem->page_alignment,
672 placement->fpfn,
673 lpfn);
674 spin_unlock(&glob->lru_lock);
675 } while (*node == NULL);
676 return 0;
677}
678
623/** 679/**
624 * Repeatedly evict memory from the LRU for @mem_type until we create enough 680 * Repeatedly evict memory from the LRU for @mem_type until we create enough
625 * space, or we've evicted everything and there isn't enough space. 681 * space, or we've evicted everything and there isn't enough space.
626 */ 682 */
627static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, 683static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
628 struct ttm_mem_reg *mem, 684 uint32_t mem_type,
629 uint32_t mem_type, 685 struct ttm_placement *placement,
630 bool interruptible, bool no_wait) 686 struct ttm_mem_reg *mem,
687 bool interruptible, bool no_wait)
631{ 688{
689 struct ttm_bo_device *bdev = bo->bdev;
632 struct ttm_bo_global *glob = bdev->glob; 690 struct ttm_bo_global *glob = bdev->glob;
633 struct drm_mm_node *node;
634 struct ttm_buffer_object *entry;
635 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 691 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
636 struct list_head *lru; 692 struct drm_mm_node *node;
637 unsigned long num_pages = mem->num_pages;
638 int put_count = 0;
639 int ret; 693 int ret;
640 694
641retry_pre_get:
642 ret = drm_mm_pre_get(&man->manager);
643 if (unlikely(ret != 0))
644 return ret;
645
646 spin_lock(&glob->lru_lock);
647 do { 695 do {
648 node = drm_mm_search_free(&man->manager, num_pages, 696 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
649 mem->page_alignment, 1); 697 if (unlikely(ret != 0))
698 return ret;
650 if (node) 699 if (node)
651 break; 700 break;
652 701 spin_lock(&glob->lru_lock);
653 lru = &man->lru; 702 if (list_empty(&man->lru)) {
654 if (list_empty(lru)) 703 spin_unlock(&glob->lru_lock);
655 break; 704 break;
656 705 }
657 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
658 kref_get(&entry->list_kref);
659
660 ret =
661 ttm_bo_reserve_locked(entry, interruptible, no_wait,
662 false, 0);
663
664 if (likely(ret == 0))
665 put_count = ttm_bo_del_from_lru(entry);
666
667 spin_unlock(&glob->lru_lock); 706 spin_unlock(&glob->lru_lock);
668 707 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
708 no_wait);
669 if (unlikely(ret != 0)) 709 if (unlikely(ret != 0))
670 return ret; 710 return ret;
671
672 while (put_count--)
673 kref_put(&entry->list_kref, ttm_bo_ref_bug);
674
675 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
676
677 ttm_bo_unreserve(entry);
678
679 kref_put(&entry->list_kref, ttm_bo_release_list);
680 if (ret)
681 return ret;
682
683 spin_lock(&glob->lru_lock);
684 } while (1); 711 } while (1);
685 712 if (node == NULL)
686 if (!node) {
687 spin_unlock(&glob->lru_lock);
688 return -ENOMEM; 713 return -ENOMEM;
689 }
690
691 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
692 if (unlikely(!node)) {
693 spin_unlock(&glob->lru_lock);
694 goto retry_pre_get;
695 }
696
697 spin_unlock(&glob->lru_lock);
698 mem->mm_node = node; 714 mem->mm_node = node;
699 mem->mem_type = mem_type; 715 mem->mem_type = mem_type;
700 return 0; 716 return 0;
@@ -725,7 +741,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
725 return result; 741 return result;
726} 742}
727 743
728
729static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 744static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
730 bool disallow_fixed, 745 bool disallow_fixed,
731 uint32_t mem_type, 746 uint32_t mem_type,
@@ -749,6 +764,18 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
749 return true; 764 return true;
750} 765}
751 766
767static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
768{
769 int i;
770
771 for (i = 0; i <= TTM_PL_PRIV5; i++)
772 if (flags & (1 << i)) {
773 *mem_type = i;
774 return 0;
775 }
776 return -EINVAL;
777}
778
752/** 779/**
753 * Creates space for memory region @mem according to its type. 780 * Creates space for memory region @mem according to its type.
754 * 781 *
@@ -758,66 +785,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
758 * space. 785 * space.
759 */ 786 */
760int ttm_bo_mem_space(struct ttm_buffer_object *bo, 787int ttm_bo_mem_space(struct ttm_buffer_object *bo,
761 uint32_t proposed_placement, 788 struct ttm_placement *placement,
762 struct ttm_mem_reg *mem, 789 struct ttm_mem_reg *mem,
763 bool interruptible, bool no_wait) 790 bool interruptible, bool no_wait)
764{ 791{
765 struct ttm_bo_device *bdev = bo->bdev; 792 struct ttm_bo_device *bdev = bo->bdev;
766 struct ttm_bo_global *glob = bo->glob;
767 struct ttm_mem_type_manager *man; 793 struct ttm_mem_type_manager *man;
768
769 uint32_t num_prios = bdev->driver->num_mem_type_prio;
770 const uint32_t *prios = bdev->driver->mem_type_prio;
771 uint32_t i;
772 uint32_t mem_type = TTM_PL_SYSTEM; 794 uint32_t mem_type = TTM_PL_SYSTEM;
773 uint32_t cur_flags = 0; 795 uint32_t cur_flags = 0;
774 bool type_found = false; 796 bool type_found = false;
775 bool type_ok = false; 797 bool type_ok = false;
776 bool has_eagain = false; 798 bool has_eagain = false;
777 struct drm_mm_node *node = NULL; 799 struct drm_mm_node *node = NULL;
778 int ret; 800 int i, ret;
779 801
780 mem->mm_node = NULL; 802 mem->mm_node = NULL;
781 for (i = 0; i < num_prios; ++i) { 803 for (i = 0; i <= placement->num_placement; ++i) {
782 mem_type = prios[i]; 804 ret = ttm_mem_type_from_flags(placement->placement[i],
805 &mem_type);
806 if (ret)
807 return ret;
783 man = &bdev->man[mem_type]; 808 man = &bdev->man[mem_type];
784 809
785 type_ok = ttm_bo_mt_compatible(man, 810 type_ok = ttm_bo_mt_compatible(man,
786 bo->type == ttm_bo_type_user, 811 bo->type == ttm_bo_type_user,
787 mem_type, proposed_placement, 812 mem_type,
788 &cur_flags); 813 placement->placement[i],
814 &cur_flags);
789 815
790 if (!type_ok) 816 if (!type_ok)
791 continue; 817 continue;
792 818
793 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 819 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
794 cur_flags); 820 cur_flags);
821 /*
822 * Use the access and other non-mapping-related flag bits from
823 * the memory placement flags to the current flags
824 */
825 ttm_flag_masked(&cur_flags, placement->placement[i],
826 ~TTM_PL_MASK_MEMTYPE);
795 827
796 if (mem_type == TTM_PL_SYSTEM) 828 if (mem_type == TTM_PL_SYSTEM)
797 break; 829 break;
798 830
799 if (man->has_type && man->use_type) { 831 if (man->has_type && man->use_type) {
800 type_found = true; 832 type_found = true;
801 do { 833 ret = ttm_bo_man_get_node(bo, man, placement, mem,
802 ret = drm_mm_pre_get(&man->manager); 834 &node);
803 if (unlikely(ret)) 835 if (unlikely(ret))
804 return ret; 836 return ret;
805
806 spin_lock(&glob->lru_lock);
807 node = drm_mm_search_free(&man->manager,
808 mem->num_pages,
809 mem->page_alignment,
810 1);
811 if (unlikely(!node)) {
812 spin_unlock(&glob->lru_lock);
813 break;
814 }
815 node = drm_mm_get_block_atomic(node,
816 mem->num_pages,
817 mem->
818 page_alignment);
819 spin_unlock(&glob->lru_lock);
820 } while (!node);
821 } 837 }
822 if (node) 838 if (node)
823 break; 839 break;
@@ -827,43 +843,48 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
827 mem->mm_node = node; 843 mem->mm_node = node;
828 mem->mem_type = mem_type; 844 mem->mem_type = mem_type;
829 mem->placement = cur_flags; 845 mem->placement = cur_flags;
846 if (node)
847 node->private = bo;
830 return 0; 848 return 0;
831 } 849 }
832 850
833 if (!type_found) 851 if (!type_found)
834 return -EINVAL; 852 return -EINVAL;
835 853
836 num_prios = bdev->driver->num_mem_busy_prio; 854 for (i = 0; i <= placement->num_busy_placement; ++i) {
837 prios = bdev->driver->mem_busy_prio; 855 ret = ttm_mem_type_from_flags(placement->placement[i],
838 856 &mem_type);
839 for (i = 0; i < num_prios; ++i) { 857 if (ret)
840 mem_type = prios[i]; 858 return ret;
841 man = &bdev->man[mem_type]; 859 man = &bdev->man[mem_type];
842
843 if (!man->has_type) 860 if (!man->has_type)
844 continue; 861 continue;
845
846 if (!ttm_bo_mt_compatible(man, 862 if (!ttm_bo_mt_compatible(man,
847 bo->type == ttm_bo_type_user, 863 bo->type == ttm_bo_type_user,
848 mem_type, 864 mem_type,
849 proposed_placement, &cur_flags)) 865 placement->placement[i],
866 &cur_flags))
850 continue; 867 continue;
851 868
852 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 869 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
853 cur_flags); 870 cur_flags);
871 /*
872 * Use the access and other non-mapping-related flag bits from
873 * the memory placement flags to the current flags
874 */
875 ttm_flag_masked(&cur_flags, placement->placement[i],
876 ~TTM_PL_MASK_MEMTYPE);
854 877
855 ret = ttm_bo_mem_force_space(bdev, mem, mem_type, 878 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
856 interruptible, no_wait); 879 interruptible, no_wait);
857
858 if (ret == 0 && mem->mm_node) { 880 if (ret == 0 && mem->mm_node) {
859 mem->placement = cur_flags; 881 mem->placement = cur_flags;
882 mem->mm_node->private = bo;
860 return 0; 883 return 0;
861 } 884 }
862
863 if (ret == -ERESTART) 885 if (ret == -ERESTART)
864 has_eagain = true; 886 has_eagain = true;
865 } 887 }
866
867 ret = (has_eagain) ? -ERESTART : -ENOMEM; 888 ret = (has_eagain) ? -ERESTART : -ENOMEM;
868 return ret; 889 return ret;
869} 890}
@@ -886,8 +907,8 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
886} 907}
887 908
888int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 909int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
889 uint32_t proposed_placement, 910 struct ttm_placement *placement,
890 bool interruptible, bool no_wait) 911 bool interruptible, bool no_wait)
891{ 912{
892 struct ttm_bo_global *glob = bo->glob; 913 struct ttm_bo_global *glob = bo->glob;
893 int ret = 0; 914 int ret = 0;
@@ -900,101 +921,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
900 * Have the driver move function wait for idle when necessary, 921 * Have the driver move function wait for idle when necessary,
901 * instead of doing it here. 922 * instead of doing it here.
902 */ 923 */
903
904 spin_lock(&bo->lock); 924 spin_lock(&bo->lock);
905 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 925 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
906 spin_unlock(&bo->lock); 926 spin_unlock(&bo->lock);
907
908 if (ret) 927 if (ret)
909 return ret; 928 return ret;
910
911 mem.num_pages = bo->num_pages; 929 mem.num_pages = bo->num_pages;
912 mem.size = mem.num_pages << PAGE_SHIFT; 930 mem.size = mem.num_pages << PAGE_SHIFT;
913 mem.page_alignment = bo->mem.page_alignment; 931 mem.page_alignment = bo->mem.page_alignment;
914
915 /* 932 /*
916 * Determine where to move the buffer. 933 * Determine where to move the buffer.
917 */ 934 */
918 935 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
919 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
920 interruptible, no_wait);
921 if (ret) 936 if (ret)
922 goto out_unlock; 937 goto out_unlock;
923
924 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 938 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
925
926out_unlock: 939out_unlock:
927 if (ret && mem.mm_node) { 940 if (ret && mem.mm_node) {
928 spin_lock(&glob->lru_lock); 941 spin_lock(&glob->lru_lock);
942 mem.mm_node->private = NULL;
929 drm_mm_put_block(mem.mm_node); 943 drm_mm_put_block(mem.mm_node);
930 spin_unlock(&glob->lru_lock); 944 spin_unlock(&glob->lru_lock);
931 } 945 }
932 return ret; 946 return ret;
933} 947}
934 948
935static int ttm_bo_mem_compat(uint32_t proposed_placement, 949static int ttm_bo_mem_compat(struct ttm_placement *placement,
936 struct ttm_mem_reg *mem) 950 struct ttm_mem_reg *mem)
937{ 951{
938 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) 952 int i;
939 return 0; 953
940 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) 954 for (i = 0; i < placement->num_placement; i++) {
941 return 0; 955 if ((placement->placement[i] & mem->placement &
942 956 TTM_PL_MASK_CACHING) &&
943 return 1; 957 (placement->placement[i] & mem->placement &
958 TTM_PL_MASK_MEM))
959 return i;
960 }
961 return -1;
944} 962}
945 963
946int ttm_buffer_object_validate(struct ttm_buffer_object *bo, 964int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
947 uint32_t proposed_placement, 965 struct ttm_placement *placement,
948 bool interruptible, bool no_wait) 966 bool interruptible, bool no_wait)
949{ 967{
950 int ret; 968 int ret;
951 969
952 BUG_ON(!atomic_read(&bo->reserved)); 970 BUG_ON(!atomic_read(&bo->reserved));
953 bo->proposed_placement = proposed_placement; 971 /* Check that range is valid */
954 972 if (placement->lpfn || placement->fpfn)
955 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", 973 if (placement->fpfn > placement->lpfn ||
956 (unsigned long)proposed_placement, 974 (placement->lpfn - placement->fpfn) < bo->num_pages)
957 (unsigned long)bo->mem.placement); 975 return -EINVAL;
958
959 /* 976 /*
960 * Check whether we need to move buffer. 977 * Check whether we need to move buffer.
961 */ 978 */
962 979 ret = ttm_bo_mem_compat(placement, &bo->mem);
963 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { 980 if (ret < 0) {
964 ret = ttm_bo_move_buffer(bo, bo->proposed_placement, 981 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
965 interruptible, no_wait); 982 if (ret)
966 if (ret) {
967 if (ret != -ERESTART)
968 printk(KERN_ERR TTM_PFX
969 "Failed moving buffer. "
970 "Proposed placement 0x%08x\n",
971 bo->proposed_placement);
972 if (ret == -ENOMEM)
973 printk(KERN_ERR TTM_PFX
974 "Out of aperture space or "
975 "DRM memory quota.\n");
976 return ret; 983 return ret;
977 } 984 } else {
985 /*
986 * Use the access and other non-mapping-related flag bits from
987 * the compatible memory placement flags to the active flags
988 */
989 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
990 ~TTM_PL_MASK_MEMTYPE);
978 } 991 }
979
980 /* 992 /*
981 * We might need to add a TTM. 993 * We might need to add a TTM.
982 */ 994 */
983
984 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 995 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
985 ret = ttm_bo_add_ttm(bo, true); 996 ret = ttm_bo_add_ttm(bo, true);
986 if (ret) 997 if (ret)
987 return ret; 998 return ret;
988 } 999 }
989 /*
990 * Validation has succeeded, move the access and other
991 * non-mapping-related flag bits from the proposed flags to
992 * the active flags
993 */
994
995 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
996 ~TTM_PL_MASK_MEMTYPE);
997
998 return 0; 1000 return 0;
999} 1001}
1000EXPORT_SYMBOL(ttm_buffer_object_validate); 1002EXPORT_SYMBOL(ttm_buffer_object_validate);
@@ -1042,8 +1044,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1042 size_t acc_size, 1044 size_t acc_size,
1043 void (*destroy) (struct ttm_buffer_object *)) 1045 void (*destroy) (struct ttm_buffer_object *))
1044{ 1046{
1045 int ret = 0; 1047 int i, c, ret = 0;
1046 unsigned long num_pages; 1048 unsigned long num_pages;
1049 uint32_t placements[8];
1050 struct ttm_placement placement;
1047 1051
1048 size += buffer_start & ~PAGE_MASK; 1052 size += buffer_start & ~PAGE_MASK;
1049 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1053 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1100,7 +1104,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1100 goto out_err; 1104 goto out_err;
1101 } 1105 }
1102 1106
1103 ret = ttm_buffer_object_validate(bo, flags, interruptible, false); 1107 placement.fpfn = 0;
1108 placement.lpfn = 0;
1109 for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
1110 if (flags & (1 << i))
1111 placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
1112 placement.placement = placements;
1113 placement.num_placement = c;
1114 placement.busy_placement = placements;
1115 placement.num_busy_placement = c;
1116 ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
1104 if (ret) 1117 if (ret)
1105 goto out_err; 1118 goto out_err;
1106 1119
@@ -1135,8 +1148,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1135 struct ttm_buffer_object **p_bo) 1148 struct ttm_buffer_object **p_bo)
1136{ 1149{
1137 struct ttm_buffer_object *bo; 1150 struct ttm_buffer_object *bo;
1138 int ret;
1139 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1151 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1152 int ret;
1140 1153
1141 size_t acc_size = 1154 size_t acc_size =
1142 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1155 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1161,66 +1174,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1161 return ret; 1174 return ret;
1162} 1175}
1163 1176
1164static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1165 uint32_t mem_type, bool allow_errors)
1166{
1167 int ret;
1168
1169 spin_lock(&bo->lock);
1170 ret = ttm_bo_wait(bo, false, false, false);
1171 spin_unlock(&bo->lock);
1172
1173 if (ret && allow_errors)
1174 goto out;
1175
1176 if (bo->mem.mem_type == mem_type)
1177 ret = ttm_bo_evict(bo, mem_type, false, false);
1178
1179 if (ret) {
1180 if (allow_errors) {
1181 goto out;
1182 } else {
1183 ret = 0;
1184 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1185 }
1186 }
1187
1188out:
1189 return ret;
1190}
1191
1192static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1177static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1193 struct list_head *head, 1178 unsigned mem_type, bool allow_errors)
1194 unsigned mem_type, bool allow_errors)
1195{ 1179{
1180 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1196 struct ttm_bo_global *glob = bdev->glob; 1181 struct ttm_bo_global *glob = bdev->glob;
1197 struct ttm_buffer_object *entry;
1198 int ret; 1182 int ret;
1199 int put_count;
1200 1183
1201 /* 1184 /*
1202 * Can't use standard list traversal since we're unlocking. 1185 * Can't use standard list traversal since we're unlocking.
1203 */ 1186 */
1204 1187
1205 spin_lock(&glob->lru_lock); 1188 spin_lock(&glob->lru_lock);
1206 1189 while (!list_empty(&man->lru)) {
1207 while (!list_empty(head)) {
1208 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1209 kref_get(&entry->list_kref);
1210 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1211 put_count = ttm_bo_del_from_lru(entry);
1212 spin_unlock(&glob->lru_lock); 1190 spin_unlock(&glob->lru_lock);
1213 while (put_count--) 1191 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1214 kref_put(&entry->list_kref, ttm_bo_ref_bug); 1192 if (ret) {
1215 BUG_ON(ret); 1193 if (allow_errors) {
1216 ret = ttm_bo_leave_list(entry, mem_type, allow_errors); 1194 return ret;
1217 ttm_bo_unreserve(entry); 1195 } else {
1218 kref_put(&entry->list_kref, ttm_bo_release_list); 1196 printk(KERN_ERR TTM_PFX
1197 "Cleanup eviction failed\n");
1198 }
1199 }
1219 spin_lock(&glob->lru_lock); 1200 spin_lock(&glob->lru_lock);
1220 } 1201 }
1221
1222 spin_unlock(&glob->lru_lock); 1202 spin_unlock(&glob->lru_lock);
1223
1224 return 0; 1203 return 0;
1225} 1204}
1226 1205
@@ -1247,7 +1226,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1247 1226
1248 ret = 0; 1227 ret = 0;
1249 if (mem_type > 0) { 1228 if (mem_type > 0) {
1250 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); 1229 ttm_bo_force_list_clean(bdev, mem_type, false);
1251 1230
1252 spin_lock(&glob->lru_lock); 1231 spin_lock(&glob->lru_lock);
1253 if (drm_mm_clean(&man->manager)) 1232 if (drm_mm_clean(&man->manager))
@@ -1280,12 +1259,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1280 return 0; 1259 return 0;
1281 } 1260 }
1282 1261
1283 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); 1262 return ttm_bo_force_list_clean(bdev, mem_type, true);
1284} 1263}
1285EXPORT_SYMBOL(ttm_bo_evict_mm); 1264EXPORT_SYMBOL(ttm_bo_evict_mm);
1286 1265
1287int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1266int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1288 unsigned long p_offset, unsigned long p_size) 1267 unsigned long p_size)
1289{ 1268{
1290 int ret = -EINVAL; 1269 int ret = -EINVAL;
1291 struct ttm_mem_type_manager *man; 1270 struct ttm_mem_type_manager *man;
@@ -1315,7 +1294,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1315 type); 1294 type);
1316 return ret; 1295 return ret;
1317 } 1296 }
1318 ret = drm_mm_init(&man->manager, p_offset, p_size); 1297 ret = drm_mm_init(&man->manager, 0, p_size);
1319 if (ret) 1298 if (ret)
1320 return ret; 1299 return ret;
1321 } 1300 }
@@ -1464,7 +1443,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1464 * Initialize the system memory buffer type. 1443 * Initialize the system memory buffer type.
1465 * Other types need to be driver / IOCTL initialized. 1444 * Other types need to be driver / IOCTL initialized.
1466 */ 1445 */
1467 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); 1446 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1468 if (unlikely(ret != 0)) 1447 if (unlikely(ret != 0))
1469 goto out_no_sys; 1448 goto out_no_sys;
1470 1449