aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-10-16 10:50:32 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-12-04 16:41:33 -0500
commitc1c7ce8f5687bb01b2eb0db3c19cb375267bb16d (patch)
tree1f084782b78db4224fc55dacf8246816b36508ab /drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
parent3da917b6c6843ad0162e9768c40a83b6c4448646 (diff)
drm/amdgpu: move GART recovery into GTT manager v2
The GTT manager handles the GART address space anyway, so it is completely pointless to keep the same information around twice. v2: rebased Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c53
1 files changed, 39 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f7669dc6909b..e14ab34d8262 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr {
31 atomic64_t available; 31 atomic64_t available;
32}; 32};
33 33
34struct amdgpu_gtt_node {
35 struct drm_mm_node node;
36 struct ttm_buffer_object *tbo;
37};
38
34/** 39/**
35 * amdgpu_gtt_mgr_init - init GTT manager and DRM MM 40 * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
36 * 41 *
@@ -87,9 +92,9 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
87 */ 92 */
88bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) 93bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
89{ 94{
90 struct drm_mm_node *node = mem->mm_node; 95 struct amdgpu_gtt_node *node = mem->mm_node;
91 96
92 return (node->start != AMDGPU_BO_INVALID_OFFSET); 97 return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
93} 98}
94 99
95/** 100/**
@@ -109,7 +114,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
109{ 114{
110 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); 115 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
111 struct amdgpu_gtt_mgr *mgr = man->priv; 116 struct amdgpu_gtt_mgr *mgr = man->priv;
112 struct drm_mm_node *node = mem->mm_node; 117 struct amdgpu_gtt_node *node = mem->mm_node;
113 enum drm_mm_insert_mode mode; 118 enum drm_mm_insert_mode mode;
114 unsigned long fpfn, lpfn; 119 unsigned long fpfn, lpfn;
115 int r; 120 int r;
@@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
132 mode = DRM_MM_INSERT_HIGH; 137 mode = DRM_MM_INSERT_HIGH;
133 138
134 spin_lock(&mgr->lock); 139 spin_lock(&mgr->lock);
135 r = drm_mm_insert_node_in_range(&mgr->mm, node, 140 r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
136 mem->num_pages, mem->page_alignment, 0, 141 mem->page_alignment, 0, fpfn, lpfn,
137 fpfn, lpfn, mode); 142 mode);
138 spin_unlock(&mgr->lock); 143 spin_unlock(&mgr->lock);
139 144
140 if (!r) 145 if (!r)
141 mem->start = node->start; 146 mem->start = node->node.start;
142 147
143 return r; 148 return r;
144} 149}
@@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
159 struct ttm_mem_reg *mem) 164 struct ttm_mem_reg *mem)
160{ 165{
161 struct amdgpu_gtt_mgr *mgr = man->priv; 166 struct amdgpu_gtt_mgr *mgr = man->priv;
162 struct drm_mm_node *node; 167 struct amdgpu_gtt_node *node;
163 int r; 168 int r;
164 169
165 spin_lock(&mgr->lock); 170 spin_lock(&mgr->lock);
@@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
177 goto err_out; 182 goto err_out;
178 } 183 }
179 184
180 node->start = AMDGPU_BO_INVALID_OFFSET; 185 node->node.start = AMDGPU_BO_INVALID_OFFSET;
181 node->size = mem->num_pages; 186 node->node.size = mem->num_pages;
187 node->tbo = tbo;
182 mem->mm_node = node; 188 mem->mm_node = node;
183 189
184 if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { 190 if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
@@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
190 goto err_out; 196 goto err_out;
191 } 197 }
192 } else { 198 } else {
193 mem->start = node->start; 199 mem->start = node->node.start;
194 } 200 }
195 201
196 return 0; 202 return 0;
@@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
214 struct ttm_mem_reg *mem) 220 struct ttm_mem_reg *mem)
215{ 221{
216 struct amdgpu_gtt_mgr *mgr = man->priv; 222 struct amdgpu_gtt_mgr *mgr = man->priv;
217 struct drm_mm_node *node = mem->mm_node; 223 struct amdgpu_gtt_node *node = mem->mm_node;
218 224
219 if (!node) 225 if (!node)
220 return; 226 return;
221 227
222 spin_lock(&mgr->lock); 228 spin_lock(&mgr->lock);
223 if (node->start != AMDGPU_BO_INVALID_OFFSET) 229 if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
224 drm_mm_remove_node(node); 230 drm_mm_remove_node(&node->node);
225 spin_unlock(&mgr->lock); 231 spin_unlock(&mgr->lock);
226 atomic64_add(mem->num_pages, &mgr->available); 232 atomic64_add(mem->num_pages, &mgr->available);
227 233
@@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
244 return (result > 0 ? result : 0) * PAGE_SIZE; 250 return (result > 0 ? result : 0) * PAGE_SIZE;
245} 251}
246 252
253int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
254{
255 struct amdgpu_gtt_mgr *mgr = man->priv;
256 struct amdgpu_gtt_node *node;
257 struct drm_mm_node *mm_node;
258 int r = 0;
259
260 spin_lock(&mgr->lock);
261 drm_mm_for_each_node(mm_node, &mgr->mm) {
262 node = container_of(mm_node, struct amdgpu_gtt_node, node);
263 r = amdgpu_ttm_recover_gart(node->tbo);
264 if (r)
265 break;
266 }
267 spin_unlock(&mgr->lock);
268
269 return r;
270}
271
247/** 272/**
248 * amdgpu_gtt_mgr_debug - dump VRAM table 273 * amdgpu_gtt_mgr_debug - dump VRAM table
249 * 274 *