aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-08-04 20:48:18 -0400
committerBen Skeggs <bskeggs@redhat.com>2010-10-04 20:01:20 -0400
commitd961db75ce86a84f1f04e91ad1014653ed7d9f46 (patch)
treea827b77524fdc0c37da70936fbb0627ac7e4b492 /drivers/gpu/drm/ttm
parent42311ff90dc8746bd81427b2ed6efda9af791b77 (diff)
drm/ttm: restructure to allow driver to plug in alternate memory manager
Nouveau will need this on GeForce 8 and up to account for the GPU reordering physical VRAM for some memory types. Reviewed-by: Jerome Glisse <jglisse@redhat.com> Acked-by: Thomas Hellström <thellstrom@vmware.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c100
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c148
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
5 files changed, 174 insertions, 83 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b256d4adfafe..f3cf6f02c997 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,7 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
8 ttm_bo_manager.o
8 9
9obj-$(CONFIG_DRM_TTM) += ttm.o 10obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4bf69c404491..f999e36f30b4 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
74{ 74{
75 struct ttm_agp_backend *agp_be = 75 struct ttm_agp_backend *agp_be =
76 container_of(backend, struct ttm_agp_backend, backend); 76 container_of(backend, struct ttm_agp_backend, backend);
77 struct drm_mm_node *node = bo_mem->mm_node;
77 struct agp_memory *mem = agp_be->mem; 78 struct agp_memory *mem = agp_be->mem;
78 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); 79 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
79 int ret; 80 int ret;
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
81 mem->is_flushed = 1; 82 mem->is_flushed = 1;
82 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; 83 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
83 84
84 ret = agp_bind_memory(mem, bo_mem->mm_node->start); 85 ret = agp_bind_memory(mem, node->start);
85 if (ret) 86 if (ret)
86 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); 87 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
87 88
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 80d37b460a8c..af7b57a47fbc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
84 man->available_caching); 84 man->available_caching);
85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", 85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
86 man->default_caching); 86 man->default_caching);
87 if (mem_type != TTM_PL_SYSTEM) { 87 if (mem_type != TTM_PL_SYSTEM)
88 spin_lock(&bdev->glob->lru_lock); 88 (*man->func->debug)(man, TTM_PFX);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&bdev->glob->lru_lock);
91 }
92} 89}
93 90
94static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 91static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -421,7 +418,7 @@ moved:
421 418
422 if (bo->mem.mm_node) { 419 if (bo->mem.mm_node) {
423 spin_lock(&bo->lock); 420 spin_lock(&bo->lock);
424 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + 421 bo->offset = (bo->mem.start << PAGE_SHIFT) +
425 bdev->man[bo->mem.mem_type].gpu_offset; 422 bdev->man[bo->mem.mem_type].gpu_offset;
426 bo->cur_placement = bo->mem.placement; 423 bo->cur_placement = bo->mem.placement;
427 spin_unlock(&bo->lock); 424 spin_unlock(&bo->lock);
@@ -724,52 +721,12 @@ retry:
724 return ret; 721 return ret;
725} 722}
726 723
727static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
728 struct ttm_mem_type_manager *man,
729 struct ttm_placement *placement,
730 struct ttm_mem_reg *mem,
731 struct drm_mm_node **node)
732{
733 struct ttm_bo_global *glob = bo->glob;
734 unsigned long lpfn;
735 int ret;
736
737 lpfn = placement->lpfn;
738 if (!lpfn)
739 lpfn = man->size;
740 *node = NULL;
741 do {
742 ret = drm_mm_pre_get(&man->manager);
743 if (unlikely(ret))
744 return ret;
745
746 spin_lock(&glob->lru_lock);
747 *node = drm_mm_search_free_in_range(&man->manager,
748 mem->num_pages, mem->page_alignment,
749 placement->fpfn, lpfn, 1);
750 if (unlikely(*node == NULL)) {
751 spin_unlock(&glob->lru_lock);
752 return 0;
753 }
754 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
755 mem->page_alignment,
756 placement->fpfn,
757 lpfn);
758 spin_unlock(&glob->lru_lock);
759 } while (*node == NULL);
760 return 0;
761}
762
763void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 724void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
764{ 725{
765 struct ttm_bo_global *glob = bo->glob; 726 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
766 727
767 if (mem->mm_node) { 728 if (mem->mm_node)
768 spin_lock(&glob->lru_lock); 729 (*man->func->put_node)(man, mem);
769 drm_mm_put_block(mem->mm_node);
770 spin_unlock(&glob->lru_lock);
771 mem->mm_node = NULL;
772 }
773} 730}
774EXPORT_SYMBOL(ttm_bo_mem_put); 731EXPORT_SYMBOL(ttm_bo_mem_put);
775 732
@@ -788,14 +745,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
788 struct ttm_bo_device *bdev = bo->bdev; 745 struct ttm_bo_device *bdev = bo->bdev;
789 struct ttm_bo_global *glob = bdev->glob; 746 struct ttm_bo_global *glob = bdev->glob;
790 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 747 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
791 struct drm_mm_node *node;
792 int ret; 748 int ret;
793 749
794 do { 750 do {
795 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); 751 ret = (*man->func->get_node)(man, bo, placement, mem);
796 if (unlikely(ret != 0)) 752 if (unlikely(ret != 0))
797 return ret; 753 return ret;
798 if (node) 754 if (mem->mm_node)
799 break; 755 break;
800 spin_lock(&glob->lru_lock); 756 spin_lock(&glob->lru_lock);
801 if (list_empty(&man->lru)) { 757 if (list_empty(&man->lru)) {
@@ -808,9 +764,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
808 if (unlikely(ret != 0)) 764 if (unlikely(ret != 0))
809 return ret; 765 return ret;
810 } while (1); 766 } while (1);
811 if (node == NULL) 767 if (mem->mm_node == NULL)
812 return -ENOMEM; 768 return -ENOMEM;
813 mem->mm_node = node;
814 mem->mem_type = mem_type; 769 mem->mem_type = mem_type;
815 return 0; 770 return 0;
816} 771}
@@ -884,7 +839,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
884 bool type_found = false; 839 bool type_found = false;
885 bool type_ok = false; 840 bool type_ok = false;
886 bool has_erestartsys = false; 841 bool has_erestartsys = false;
887 struct drm_mm_node *node = NULL;
888 int i, ret; 842 int i, ret;
889 843
890 mem->mm_node = NULL; 844 mem->mm_node = NULL;
@@ -918,17 +872,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
918 872
919 if (man->has_type && man->use_type) { 873 if (man->has_type && man->use_type) {
920 type_found = true; 874 type_found = true;
921 ret = ttm_bo_man_get_node(bo, man, placement, mem, 875 ret = (*man->func->get_node)(man, bo, placement, mem);
922 &node);
923 if (unlikely(ret)) 876 if (unlikely(ret))
924 return ret; 877 return ret;
925 } 878 }
926 if (node) 879 if (mem->mm_node)
927 break; 880 break;
928 } 881 }
929 882
930 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { 883 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
931 mem->mm_node = node;
932 mem->mem_type = mem_type; 884 mem->mem_type = mem_type;
933 mem->placement = cur_flags; 885 mem->placement = cur_flags;
934 return 0; 886 return 0;
@@ -998,7 +950,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
998 bool interruptible, bool no_wait_reserve, 950 bool interruptible, bool no_wait_reserve,
999 bool no_wait_gpu) 951 bool no_wait_gpu)
1000{ 952{
1001 struct ttm_bo_global *glob = bo->glob;
1002 int ret = 0; 953 int ret = 0;
1003 struct ttm_mem_reg mem; 954 struct ttm_mem_reg mem;
1004 955
@@ -1026,11 +977,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1026 goto out_unlock; 977 goto out_unlock;
1027 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); 978 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1028out_unlock: 979out_unlock:
1029 if (ret && mem.mm_node) { 980 if (ret && mem.mm_node)
1030 spin_lock(&glob->lru_lock); 981 ttm_bo_mem_put(bo, &mem);
1031 drm_mm_put_block(mem.mm_node);
1032 spin_unlock(&glob->lru_lock);
1033 }
1034 return ret; 982 return ret;
1035} 983}
1036 984
@@ -1038,11 +986,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1038 struct ttm_mem_reg *mem) 986 struct ttm_mem_reg *mem)
1039{ 987{
1040 int i; 988 int i;
1041 struct drm_mm_node *node = mem->mm_node;
1042 989
1043 if (node && placement->lpfn != 0 && 990 if (mem->mm_node && placement->lpfn != 0 &&
1044 (node->start < placement->fpfn || 991 (mem->start < placement->fpfn ||
1045 node->start + node->size > placement->lpfn)) 992 mem->start + mem->num_pages > placement->lpfn))
1046 return -1; 993 return -1;
1047 994
1048 for (i = 0; i < placement->num_placement; i++) { 995 for (i = 0; i < placement->num_placement; i++) {
@@ -1286,7 +1233,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1286 1233
1287int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1234int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1288{ 1235{
1289 struct ttm_bo_global *glob = bdev->glob;
1290 struct ttm_mem_type_manager *man; 1236 struct ttm_mem_type_manager *man;
1291 int ret = -EINVAL; 1237 int ret = -EINVAL;
1292 1238
@@ -1309,13 +1255,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1309 if (mem_type > 0) { 1255 if (mem_type > 0) {
1310 ttm_bo_force_list_clean(bdev, mem_type, false); 1256 ttm_bo_force_list_clean(bdev, mem_type, false);
1311 1257
1312 spin_lock(&glob->lru_lock); 1258 ret = (*man->func->takedown)(man);
1313 if (drm_mm_clean(&man->manager))
1314 drm_mm_takedown(&man->manager);
1315 else
1316 ret = -EBUSY;
1317
1318 spin_unlock(&glob->lru_lock);
1319 } 1259 }
1320 1260
1321 return ret; 1261 return ret;
@@ -1366,6 +1306,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1366 ret = bdev->driver->init_mem_type(bdev, type, man); 1306 ret = bdev->driver->init_mem_type(bdev, type, man);
1367 if (ret) 1307 if (ret)
1368 return ret; 1308 return ret;
1309 man->bdev = bdev;
1369 1310
1370 ret = 0; 1311 ret = 0;
1371 if (type != TTM_PL_SYSTEM) { 1312 if (type != TTM_PL_SYSTEM) {
@@ -1375,7 +1316,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1375 type); 1316 type);
1376 return ret; 1317 return ret;
1377 } 1318 }
1378 ret = drm_mm_init(&man->manager, 0, p_size); 1319
1320 ret = (*man->func->init)(man, p_size);
1379 if (ret) 1321 if (ret)
1380 return ret; 1322 return ret;
1381 } 1323 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644
index 000000000000..7410c190c891
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -0,0 +1,148 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h>
40
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem)
45{
46 struct ttm_bo_global *glob = man->bdev->glob;
47 struct drm_mm *mm = man->priv;
48 struct drm_mm_node *node = NULL;
49 unsigned long lpfn;
50 int ret;
51
52 lpfn = placement->lpfn;
53 if (!lpfn)
54 lpfn = man->size;
55 do {
56 ret = drm_mm_pre_get(mm);
57 if (unlikely(ret))
58 return ret;
59
60 spin_lock(&glob->lru_lock);
61 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock);
66 return 0;
67 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment,
70 placement->fpfn,
71 lpfn);
72 spin_unlock(&glob->lru_lock);
73 } while (node == NULL);
74
75 mem->mm_node = node;
76 mem->start = node->start;
77 return 0;
78}
79
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem)
82{
83 struct ttm_bo_global *glob = man->bdev->glob;
84
85 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock);
87 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock);
89 mem->mm_node = NULL;
90 }
91}
92
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size)
95{
96 struct drm_mm *mm;
97 int ret;
98
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
100 if (!mm)
101 return -ENOMEM;
102
103 ret = drm_mm_init(mm, 0, p_size);
104 if (ret) {
105 kfree(mm);
106 return ret;
107 }
108
109 man->priv = mm;
110 return 0;
111}
112
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{
115 struct ttm_bo_global *glob = man->bdev->glob;
116 struct drm_mm *mm = man->priv;
117 int ret = 0;
118
119 spin_lock(&glob->lru_lock);
120 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm);
122 kfree(mm);
123 man->priv = NULL;
124 } else
125 ret = -EBUSY;
126 spin_unlock(&glob->lru_lock);
127 return ret;
128}
129
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix)
132{
133 struct ttm_bo_global *glob = man->bdev->glob;
134 struct drm_mm *mm = man->priv;
135
136 spin_lock(&glob->lru_lock);
137 drm_mm_debug_table(mm, prefix);
138 spin_unlock(&glob->lru_lock);
139}
140
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
142 ttm_bo_man_init,
143 ttm_bo_man_takedown,
144 ttm_bo_man_get_node,
145 ttm_bo_man_put_node,
146 ttm_bo_man_debug
147};
148EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0ebfe0d94931..c9d2d4d8d066 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -256,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
256 dir = 1; 256 dir = 1;
257 257
258 if ((old_mem->mem_type == new_mem->mem_type) && 258 if ((old_mem->mem_type == new_mem->mem_type) &&
259 (new_mem->mm_node->start < 259 (new_mem->start < old_mem->start + old_mem->size)) {
260 old_mem->mm_node->start + old_mem->mm_node->size)) {
261 dir = -1; 260 dir = -1;
262 add = new_mem->num_pages - 1; 261 add = new_mem->num_pages - 1;
263 } 262 }