diff options
author | Michel Dänzer <daenzer@vmware.com> | 2010-07-07 22:43:28 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-08-01 20:00:08 -0400 |
commit | e376573f7267390f4e1bdc552564b6fb913bce76 (patch) | |
tree | 2ce33e85ac5ca32c03006dd81a2f6fec52b0227f | |
parent | 351a52a2414d2b104269755c86b476863c248034 (diff) |
drm/radeon: fall back to GTT if bo creation/validation in VRAM fails.
This fixes a problem where on low VRAM cards we'd run out of space for validation.
[airlied: Tested on my M7, Thinkpad T42, compiz works with no problems.]
Signed-off-by: Michel Dänzer <daenzer@vmware.com>
Cc: stable@kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 27 |
1 files changed, 18 insertions, 9 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d5b9373ce06c..0afd1e62347d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -110,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
110 | bo->surface_reg = -1; | 110 | bo->surface_reg = -1; |
111 | INIT_LIST_HEAD(&bo->list); | 111 | INIT_LIST_HEAD(&bo->list); |
112 | 112 | ||
113 | retry: | ||
113 | radeon_ttm_placement_from_domain(bo, domain); | 114 | radeon_ttm_placement_from_domain(bo, domain); |
114 | /* Kernel allocation are uninterruptible */ | 115 | /* Kernel allocation are uninterruptible */ |
115 | mutex_lock(&rdev->vram_mutex); | 116 | mutex_lock(&rdev->vram_mutex); |
@@ -118,10 +119,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
118 | &radeon_ttm_bo_destroy); | 119 | &radeon_ttm_bo_destroy); |
119 | mutex_unlock(&rdev->vram_mutex); | 120 | mutex_unlock(&rdev->vram_mutex); |
120 | if (unlikely(r != 0)) { | 121 | if (unlikely(r != 0)) { |
121 | if (r != -ERESTARTSYS) | 122 | if (r != -ERESTARTSYS) { |
123 | if (domain == RADEON_GEM_DOMAIN_VRAM) { | ||
124 | domain |= RADEON_GEM_DOMAIN_GTT; | ||
125 | goto retry; | ||
126 | } | ||
122 | dev_err(rdev->dev, | 127 | dev_err(rdev->dev, |
123 | "object_init failed for (%lu, 0x%08X)\n", | 128 | "object_init failed for (%lu, 0x%08X)\n", |
124 | size, domain); | 129 | size, domain); |
130 | } | ||
125 | return r; | 131 | return r; |
126 | } | 132 | } |
127 | *bo_ptr = bo; | 133 | *bo_ptr = bo; |
@@ -321,6 +327,7 @@ int radeon_bo_list_validate(struct list_head *head) | |||
321 | { | 327 | { |
322 | struct radeon_bo_list *lobj; | 328 | struct radeon_bo_list *lobj; |
323 | struct radeon_bo *bo; | 329 | struct radeon_bo *bo; |
330 | u32 domain; | ||
324 | int r; | 331 | int r; |
325 | 332 | ||
326 | list_for_each_entry(lobj, head, list) { | 333 | list_for_each_entry(lobj, head, list) { |
@@ -333,17 +340,19 @@ int radeon_bo_list_validate(struct list_head *head) | |||
333 | list_for_each_entry(lobj, head, list) { | 340 | list_for_each_entry(lobj, head, list) { |
334 | bo = lobj->bo; | 341 | bo = lobj->bo; |
335 | if (!bo->pin_count) { | 342 | if (!bo->pin_count) { |
336 | if (lobj->wdomain) { | 343 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; |
337 | radeon_ttm_placement_from_domain(bo, | 344 | |
338 | lobj->wdomain); | 345 | retry: |
339 | } else { | 346 | radeon_ttm_placement_from_domain(bo, domain); |
340 | radeon_ttm_placement_from_domain(bo, | ||
341 | lobj->rdomain); | ||
342 | } | ||
343 | r = ttm_bo_validate(&bo->tbo, &bo->placement, | 347 | r = ttm_bo_validate(&bo->tbo, &bo->placement, |
344 | true, false, false); | 348 | true, false, false); |
345 | if (unlikely(r)) | 349 | if (unlikely(r)) { |
350 | if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { | ||
351 | domain |= RADEON_GEM_DOMAIN_GTT; | ||
352 | goto retry; | ||
353 | } | ||
346 | return r; | 354 | return r; |
355 | } | ||
347 | } | 356 | } |
348 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | 357 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
349 | lobj->tiling_flags = bo->tiling_flags; | 358 | lobj->tiling_flags = bo->tiling_flags; |