aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2015-09-02 08:33:42 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-09-02 08:33:42 -0400
commite93c28f39375558409329a02a767d5cadfcc4a31 (patch)
tree9f1b4b5ce765b887b6002cded59fc934e6c9c012 /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
parent85a62bf9d8ef8d533635270ae985281c58e8c974 (diff)
parent6fa2d197936ba0b8936e813d0adecefac160062b (diff)
Merge tag 'drm-intel-next-fixes-2015-09-02' into drm-intel-next-queued
Backmerge -fixes since there's more DDI-E related cleanups on top of the pile of -fixes for skl that just landed for 4.3. Conflicts: drivers/gpu/drm/i915/intel_display.c drivers/gpu/drm/i914/intel_dp.c drivers/gpu/drm/i915/intel_lrc.c Conflicts are all fairly harmless adjacent line stuff. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c24
1 files changed, 5 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8da64245b31b..57adcad2f7ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -223,18 +223,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
223 size_t acc_size; 223 size_t acc_size;
224 int r; 224 int r;
225 225
226 /* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
227 * do this as a temporary workaround
228 */
229 if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
230 if (adev->asic_type >= CHIP_TOPAZ) {
231 if (byte_align & 0x7fff)
232 byte_align = ALIGN(byte_align, 0x8000);
233 if (size & 0x7fff)
234 size = ALIGN(size, 0x8000);
235 }
236 }
237
238 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 226 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
239 size = ALIGN(size, PAGE_SIZE); 227 size = ALIGN(size, PAGE_SIZE);
240 228
@@ -462,7 +450,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
462int amdgpu_bo_evict_vram(struct amdgpu_device *adev) 450int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
463{ 451{
464 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 452 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
465 if (0 && (adev->flags & AMDGPU_IS_APU)) { 453 if (0 && (adev->flags & AMD_IS_APU)) {
466 /* Useless to evict on IGP chips */ 454 /* Useless to evict on IGP chips */
467 return 0; 455 return 0;
468 } 456 }
@@ -478,7 +466,6 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
478 } 466 }
479 dev_err(adev->dev, "Userspace still has active objects !\n"); 467 dev_err(adev->dev, "Userspace still has active objects !\n");
480 list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { 468 list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
481 mutex_lock(&adev->ddev->struct_mutex);
482 dev_err(adev->dev, "%p %p %lu %lu force free\n", 469 dev_err(adev->dev, "%p %p %lu %lu force free\n",
483 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 470 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
484 *((unsigned long *)&bo->gem_base.refcount)); 471 *((unsigned long *)&bo->gem_base.refcount));
@@ -486,8 +473,7 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
486 list_del_init(&bo->list); 473 list_del_init(&bo->list);
487 mutex_unlock(&bo->adev->gem.mutex); 474 mutex_unlock(&bo->adev->gem.mutex);
488 /* this should unref the ttm bo */ 475 /* this should unref the ttm bo */
489 drm_gem_object_unreference(&bo->gem_base); 476 drm_gem_object_unreference_unlocked(&bo->gem_base);
490 mutex_unlock(&adev->ddev->struct_mutex);
491 } 477 }
492} 478}
493 479
@@ -658,13 +644,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
658 * @shared: true if fence should be added shared 644 * @shared: true if fence should be added shared
659 * 645 *
660 */ 646 */
661void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, 647void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
662 bool shared) 648 bool shared)
663{ 649{
664 struct reservation_object *resv = bo->tbo.resv; 650 struct reservation_object *resv = bo->tbo.resv;
665 651
666 if (shared) 652 if (shared)
667 reservation_object_add_shared_fence(resv, &fence->base); 653 reservation_object_add_shared_fence(resv, fence);
668 else 654 else
669 reservation_object_add_excl_fence(resv, &fence->base); 655 reservation_object_add_excl_fence(resv, fence);
670} 656}