aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c90
1 files changed, 45 insertions, 45 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index bec494384825..2040937682fd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,6 +75,25 @@ static inline u32 radeon_ttm_flags_from_domain(u32 domain)
75 return flags; 75 return flags;
76} 76}
77 77
78void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
79{
80 u32 c = 0;
81
82 rbo->placement.fpfn = 0;
83 rbo->placement.lpfn = 0;
84 rbo->placement.placement = rbo->placements;
85 rbo->placement.busy_placement = rbo->placements;
86 if (domain & RADEON_GEM_DOMAIN_VRAM)
87 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
88 TTM_PL_FLAG_VRAM;
89 if (domain & RADEON_GEM_DOMAIN_GTT)
90 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
91 if (domain & RADEON_GEM_DOMAIN_CPU)
92 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
93 rbo->placement.num_placement = c;
94 rbo->placement.num_busy_placement = c;
95}
96
78int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, 97int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
79 unsigned long size, bool kernel, u32 domain, 98 unsigned long size, bool kernel, u32 domain,
80 struct radeon_bo **bo_ptr) 99 struct radeon_bo **bo_ptr)
@@ -102,16 +121,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
102 INIT_LIST_HEAD(&bo->list); 121 INIT_LIST_HEAD(&bo->list);
103 122
104 flags = radeon_ttm_flags_from_domain(domain); 123 flags = radeon_ttm_flags_from_domain(domain);
105retry: 124 /* Kernel allocation are uninterruptible */
106 r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type, 125 r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
107 flags, 0, 0, true, NULL, size, 126 flags, 0, 0, !kernel, NULL, size,
108 &radeon_ttm_bo_destroy); 127 &radeon_ttm_bo_destroy);
109 if (unlikely(r != 0)) { 128 if (unlikely(r != 0)) {
110 if (r == -ERESTART) 129 if (r != -ERESTARTSYS)
111 goto retry; 130 dev_err(rdev->dev,
112 /* ttm call radeon_ttm_object_object_destroy if error happen */ 131 "object_init failed for (%ld, 0x%08X)\n",
113 dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n", 132 size, flags);
114 size, flags);
115 return r; 133 return r;
116 } 134 }
117 *bo_ptr = bo; 135 *bo_ptr = bo;
@@ -169,40 +187,32 @@ void radeon_bo_unref(struct radeon_bo **bo)
169 187
170int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 188int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
171{ 189{
172 u32 flags; 190 int r, i;
173 u32 tmp;
174 int r;
175 191
176 flags = radeon_ttm_flags_from_domain(domain); 192 radeon_ttm_placement_from_domain(bo, domain);
177 if (bo->pin_count) { 193 if (bo->pin_count) {
178 bo->pin_count++; 194 bo->pin_count++;
179 if (gpu_addr) 195 if (gpu_addr)
180 *gpu_addr = radeon_bo_gpu_offset(bo); 196 *gpu_addr = radeon_bo_gpu_offset(bo);
181 return 0; 197 return 0;
182 } 198 }
183 tmp = bo->tbo.mem.placement; 199 radeon_ttm_placement_from_domain(bo, domain);
184 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); 200 for (i = 0; i < bo->placement.num_placement; i++)
185 bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | 201 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
186 TTM_PL_MASK_CACHING; 202 r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
187retry:
188 r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
189 true, false);
190 if (likely(r == 0)) { 203 if (likely(r == 0)) {
191 bo->pin_count = 1; 204 bo->pin_count = 1;
192 if (gpu_addr != NULL) 205 if (gpu_addr != NULL)
193 *gpu_addr = radeon_bo_gpu_offset(bo); 206 *gpu_addr = radeon_bo_gpu_offset(bo);
194 } 207 }
195 if (unlikely(r != 0)) { 208 if (unlikely(r != 0))
196 if (r == -ERESTART)
197 goto retry;
198 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 209 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
199 }
200 return r; 210 return r;
201} 211}
202 212
203int radeon_bo_unpin(struct radeon_bo *bo) 213int radeon_bo_unpin(struct radeon_bo *bo)
204{ 214{
205 int r; 215 int r, i;
206 216
207 if (!bo->pin_count) { 217 if (!bo->pin_count) {
208 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 218 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
@@ -211,18 +221,12 @@ int radeon_bo_unpin(struct radeon_bo *bo)
211 bo->pin_count--; 221 bo->pin_count--;
212 if (bo->pin_count) 222 if (bo->pin_count)
213 return 0; 223 return 0;
214 bo->tbo.proposed_placement = bo->tbo.mem.placement & 224 for (i = 0; i < bo->placement.num_placement; i++)
215 ~TTM_PL_FLAG_NO_EVICT; 225 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
216retry: 226 r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
217 r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement, 227 if (unlikely(r != 0))
218 true, false);
219 if (unlikely(r != 0)) {
220 if (r == -ERESTART)
221 goto retry;
222 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 228 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
223 return r; 229 return r;
224 }
225 return 0;
226} 230}
227 231
228int radeon_bo_evict_vram(struct radeon_device *rdev) 232int radeon_bo_evict_vram(struct radeon_device *rdev)
@@ -326,21 +330,17 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
326 bo = lobj->bo; 330 bo = lobj->bo;
327 if (!bo->pin_count) { 331 if (!bo->pin_count) {
328 if (lobj->wdomain) { 332 if (lobj->wdomain) {
329 bo->tbo.proposed_placement = 333 radeon_ttm_placement_from_domain(bo,
330 radeon_ttm_flags_from_domain(lobj->wdomain); 334 lobj->wdomain);
331 } else { 335 } else {
332 bo->tbo.proposed_placement = 336 radeon_ttm_placement_from_domain(bo,
333 radeon_ttm_flags_from_domain(lobj->rdomain); 337 lobj->rdomain);
334 } 338 }
335retry:
336 r = ttm_buffer_object_validate(&bo->tbo, 339 r = ttm_buffer_object_validate(&bo->tbo,
337 bo->tbo.proposed_placement, 340 &bo->placement,
338 true, false); 341 true, false);
339 if (unlikely(r)) { 342 if (unlikely(r))
340 if (r == -ERESTART)
341 goto retry;
342 return r; 343 return r;
343 }
344 } 344 }
345 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 345 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
346 lobj->tiling_flags = bo->tiling_flags; 346 lobj->tiling_flags = bo->tiling_flags;
@@ -378,7 +378,7 @@ int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
378 return ttm_fbdev_mmap(vma, &bo->tbo); 378 return ttm_fbdev_mmap(vma, &bo->tbo);
379} 379}
380 380
381static int radeon_bo_get_surface_reg(struct radeon_bo *bo) 381int radeon_bo_get_surface_reg(struct radeon_bo *bo)
382{ 382{
383 struct radeon_device *rdev = bo->rdev; 383 struct radeon_device *rdev = bo->rdev;
384 struct radeon_surface_reg *reg; 384 struct radeon_surface_reg *reg;