aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2014-08-27 07:16:04 -0400
committerChristian König <christian.koenig@amd.com>2014-08-27 07:16:04 -0400
commitf1217ed09f827e42a49ffa6a5aab672aa6f57a65 (patch)
treeab6a78bc2f7b0d42165eb647e13e87f92b97f149 /drivers/gpu/drm/ttm
parent484048db6b4890bc433aac7f5e32fdcf1b2b4786 (diff)
drm/ttm: move fpfn and lpfn into each placement v2
This allows us to more fine grained specify where to place the buffer object. v2: rebased on drm-next, add bochs changes as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c93
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c9
2 files changed, 43 insertions, 59 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3da89d5dab60..b992ec3c318a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -53,12 +53,13 @@ static struct attribute ttm_bo_count = {
53 .mode = S_IRUGO 53 .mode = S_IRUGO
54}; 54};
55 55
56static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) 56static inline int ttm_mem_type_from_place(const struct ttm_place *place,
57 uint32_t *mem_type)
57{ 58{
58 int i; 59 int i;
59 60
60 for (i = 0; i <= TTM_PL_PRIV5; i++) 61 for (i = 0; i <= TTM_PL_PRIV5; i++)
61 if (flags & (1 << i)) { 62 if (place->flags & (1 << i)) {
62 *mem_type = i; 63 *mem_type = i;
63 return 0; 64 return 0;
64 } 65 }
@@ -89,12 +90,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
89 bo, bo->mem.num_pages, bo->mem.size >> 10, 90 bo, bo->mem.num_pages, bo->mem.size >> 10,
90 bo->mem.size >> 20); 91 bo->mem.size >> 20);
91 for (i = 0; i < placement->num_placement; i++) { 92 for (i = 0; i < placement->num_placement; i++) {
92 ret = ttm_mem_type_from_flags(placement->placement[i], 93 ret = ttm_mem_type_from_place(&placement->placement[i],
93 &mem_type); 94 &mem_type);
94 if (ret) 95 if (ret)
95 return; 96 return;
96 pr_err(" placement[%d]=0x%08X (%d)\n", 97 pr_err(" placement[%d]=0x%08X (%d)\n",
97 i, placement->placement[i], mem_type); 98 i, placement->placement[i].flags, mem_type);
98 ttm_mem_type_debug(bo->bdev, mem_type); 99 ttm_mem_type_debug(bo->bdev, mem_type);
99 } 100 }
100} 101}
@@ -685,8 +686,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
685 evict_mem.bus.io_reserved_vm = false; 686 evict_mem.bus.io_reserved_vm = false;
686 evict_mem.bus.io_reserved_count = 0; 687 evict_mem.bus.io_reserved_count = 0;
687 688
688 placement.fpfn = 0;
689 placement.lpfn = 0;
690 placement.num_placement = 0; 689 placement.num_placement = 0;
691 placement.num_busy_placement = 0; 690 placement.num_busy_placement = 0;
692 bdev->driver->evict_flags(bo, &placement); 691 bdev->driver->evict_flags(bo, &placement);
@@ -774,7 +773,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
774 */ 773 */
775static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 774static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
776 uint32_t mem_type, 775 uint32_t mem_type,
777 struct ttm_placement *placement, 776 const struct ttm_place *place,
778 struct ttm_mem_reg *mem, 777 struct ttm_mem_reg *mem,
779 bool interruptible, 778 bool interruptible,
780 bool no_wait_gpu) 779 bool no_wait_gpu)
@@ -784,7 +783,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
784 int ret; 783 int ret;
785 784
786 do { 785 do {
787 ret = (*man->func->get_node)(man, bo, placement, 0, mem); 786 ret = (*man->func->get_node)(man, bo, place, mem);
788 if (unlikely(ret != 0)) 787 if (unlikely(ret != 0))
789 return ret; 788 return ret;
790 if (mem->mm_node) 789 if (mem->mm_node)
@@ -827,18 +826,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
827 826
828static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 827static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
829 uint32_t mem_type, 828 uint32_t mem_type,
830 uint32_t proposed_placement, 829 const struct ttm_place *place,
831 uint32_t *masked_placement) 830 uint32_t *masked_placement)
832{ 831{
833 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 832 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
834 833
835 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) 834 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
836 return false; 835 return false;
837 836
838 if ((proposed_placement & man->available_caching) == 0) 837 if ((place->flags & man->available_caching) == 0)
839 return false; 838 return false;
840 839
841 cur_flags |= (proposed_placement & man->available_caching); 840 cur_flags |= (place->flags & man->available_caching);
842 841
843 *masked_placement = cur_flags; 842 *masked_placement = cur_flags;
844 return true; 843 return true;
@@ -869,15 +868,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
869 868
870 mem->mm_node = NULL; 869 mem->mm_node = NULL;
871 for (i = 0; i < placement->num_placement; ++i) { 870 for (i = 0; i < placement->num_placement; ++i) {
872 ret = ttm_mem_type_from_flags(placement->placement[i], 871 const struct ttm_place *place = &placement->placement[i];
873 &mem_type); 872
873 ret = ttm_mem_type_from_place(place, &mem_type);
874 if (ret) 874 if (ret)
875 return ret; 875 return ret;
876 man = &bdev->man[mem_type]; 876 man = &bdev->man[mem_type];
877 877
878 type_ok = ttm_bo_mt_compatible(man, 878 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
879 mem_type,
880 placement->placement[i],
881 &cur_flags); 879 &cur_flags);
882 880
883 if (!type_ok) 881 if (!type_ok)
@@ -889,7 +887,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
889 * Use the access and other non-mapping-related flag bits from 887 * Use the access and other non-mapping-related flag bits from
890 * the memory placement flags to the current flags 888 * the memory placement flags to the current flags
891 */ 889 */
892 ttm_flag_masked(&cur_flags, placement->placement[i], 890 ttm_flag_masked(&cur_flags, place->flags,
893 ~TTM_PL_MASK_MEMTYPE); 891 ~TTM_PL_MASK_MEMTYPE);
894 892
895 if (mem_type == TTM_PL_SYSTEM) 893 if (mem_type == TTM_PL_SYSTEM)
@@ -897,8 +895,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
897 895
898 if (man->has_type && man->use_type) { 896 if (man->has_type && man->use_type) {
899 type_found = true; 897 type_found = true;
900 ret = (*man->func->get_node)(man, bo, placement, 898 ret = (*man->func->get_node)(man, bo, place, mem);
901 cur_flags, mem);
902 if (unlikely(ret)) 899 if (unlikely(ret))
903 return ret; 900 return ret;
904 } 901 }
@@ -916,17 +913,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
916 return -EINVAL; 913 return -EINVAL;
917 914
918 for (i = 0; i < placement->num_busy_placement; ++i) { 915 for (i = 0; i < placement->num_busy_placement; ++i) {
919 ret = ttm_mem_type_from_flags(placement->busy_placement[i], 916 const struct ttm_place *place = &placement->busy_placement[i];
920 &mem_type); 917
918 ret = ttm_mem_type_from_place(place, &mem_type);
921 if (ret) 919 if (ret)
922 return ret; 920 return ret;
923 man = &bdev->man[mem_type]; 921 man = &bdev->man[mem_type];
924 if (!man->has_type) 922 if (!man->has_type)
925 continue; 923 continue;
926 if (!ttm_bo_mt_compatible(man, 924 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
927 mem_type,
928 placement->busy_placement[i],
929 &cur_flags))
930 continue; 925 continue;
931 926
932 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 927 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
@@ -935,7 +930,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
935 * Use the access and other non-mapping-related flag bits from 930 * Use the access and other non-mapping-related flag bits from
936 * the memory placement flags to the current flags 931 * the memory placement flags to the current flags
937 */ 932 */
938 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 933 ttm_flag_masked(&cur_flags, place->flags,
939 ~TTM_PL_MASK_MEMTYPE); 934 ~TTM_PL_MASK_MEMTYPE);
940 935
941 if (mem_type == TTM_PL_SYSTEM) { 936 if (mem_type == TTM_PL_SYSTEM) {
@@ -945,7 +940,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
945 return 0; 940 return 0;
946 } 941 }
947 942
948 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 943 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
949 interruptible, no_wait_gpu); 944 interruptible, no_wait_gpu);
950 if (ret == 0 && mem->mm_node) { 945 if (ret == 0 && mem->mm_node) {
951 mem->placement = cur_flags; 946 mem->placement = cur_flags;
@@ -1006,20 +1001,27 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1006{ 1001{
1007 int i; 1002 int i;
1008 1003
1009 if (mem->mm_node && placement->lpfn != 0 &&
1010 (mem->start < placement->fpfn ||
1011 mem->start + mem->num_pages > placement->lpfn))
1012 return false;
1013
1014 for (i = 0; i < placement->num_placement; i++) { 1004 for (i = 0; i < placement->num_placement; i++) {
1015 *new_flags = placement->placement[i]; 1005 const struct ttm_place *heap = &placement->placement[i];
1006 if (mem->mm_node && heap->lpfn != 0 &&
1007 (mem->start < heap->fpfn ||
1008 mem->start + mem->num_pages > heap->lpfn))
1009 continue;
1010
1011 *new_flags = heap->flags;
1016 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1012 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1017 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1013 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1018 return true; 1014 return true;
1019 } 1015 }
1020 1016
1021 for (i = 0; i < placement->num_busy_placement; i++) { 1017 for (i = 0; i < placement->num_busy_placement; i++) {
1022 *new_flags = placement->busy_placement[i]; 1018 const struct ttm_place *heap = &placement->busy_placement[i];
1019 if (mem->mm_node && heap->lpfn != 0 &&
1020 (mem->start < heap->fpfn ||
1021 mem->start + mem->num_pages > heap->lpfn))
1022 continue;
1023
1024 *new_flags = heap->flags;
1023 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1025 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1024 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1026 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1025 return true; 1027 return true;
@@ -1037,11 +1039,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1037 uint32_t new_flags; 1039 uint32_t new_flags;
1038 1040
1039 lockdep_assert_held(&bo->resv->lock.base); 1041 lockdep_assert_held(&bo->resv->lock.base);
1040 /* Check that range is valid */
1041 if (placement->lpfn || placement->fpfn)
1042 if (placement->fpfn > placement->lpfn ||
1043 (placement->lpfn - placement->fpfn) < bo->num_pages)
1044 return -EINVAL;
1045 /* 1042 /*
1046 * Check whether we need to move buffer. 1043 * Check whether we need to move buffer.
1047 */ 1044 */
@@ -1070,15 +1067,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1070} 1067}
1071EXPORT_SYMBOL(ttm_bo_validate); 1068EXPORT_SYMBOL(ttm_bo_validate);
1072 1069
1073int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1074 struct ttm_placement *placement)
1075{
1076 BUG_ON((placement->fpfn || placement->lpfn) &&
1077 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1078
1079 return 0;
1080}
1081
1082int ttm_bo_init(struct ttm_bo_device *bdev, 1070int ttm_bo_init(struct ttm_bo_device *bdev,
1083 struct ttm_buffer_object *bo, 1071 struct ttm_buffer_object *bo,
1084 unsigned long size, 1072 unsigned long size,
@@ -1147,15 +1135,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1147 atomic_inc(&bo->glob->bo_count); 1135 atomic_inc(&bo->glob->bo_count);
1148 drm_vma_node_reset(&bo->vma_node); 1136 drm_vma_node_reset(&bo->vma_node);
1149 1137
1150 ret = ttm_bo_check_placement(bo, placement);
1151
1152 /* 1138 /*
1153 * For ttm_bo_type_device buffers, allocate 1139 * For ttm_bo_type_device buffers, allocate
1154 * address space from the device. 1140 * address space from the device.
1155 */ 1141 */
1156 if (likely(!ret) && 1142 if (bo->type == ttm_bo_type_device ||
1157 (bo->type == ttm_bo_type_device || 1143 bo->type == ttm_bo_type_sg)
1158 bo->type == ttm_bo_type_sg))
1159 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1144 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1160 bo->mem.num_pages); 1145 bo->mem.num_pages);
1161 1146
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 9e103a4875c8..964387fc5c8f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -49,8 +49,7 @@ struct ttm_range_manager {
49 49
50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
51 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
52 struct ttm_placement *placement, 52 const struct ttm_place *place,
53 uint32_t flags,
54 struct ttm_mem_reg *mem) 53 struct ttm_mem_reg *mem)
55{ 54{
56 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -60,7 +59,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
60 unsigned long lpfn; 59 unsigned long lpfn;
61 int ret; 60 int ret;
62 61
63 lpfn = placement->lpfn; 62 lpfn = place->lpfn;
64 if (!lpfn) 63 if (!lpfn)
65 lpfn = man->size; 64 lpfn = man->size;
66 65
@@ -68,13 +67,13 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
68 if (!node) 67 if (!node)
69 return -ENOMEM; 68 return -ENOMEM;
70 69
71 if (flags & TTM_PL_FLAG_TOPDOWN) 70 if (place->flags & TTM_PL_FLAG_TOPDOWN)
72 aflags = DRM_MM_CREATE_TOP; 71 aflags = DRM_MM_CREATE_TOP;
73 72
74 spin_lock(&rman->lock); 73 spin_lock(&rman->lock);
75 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, 74 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
76 mem->page_alignment, 0, 75 mem->page_alignment, 0,
77 placement->fpfn, lpfn, 76 place->fpfn, lpfn,
78 DRM_MM_SEARCH_BEST, 77 DRM_MM_SEARCH_BEST,
79 aflags); 78 aflags);
80 spin_unlock(&rman->lock); 79 spin_unlock(&rman->lock);