aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2010-11-04 12:11:09 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-11-04 15:02:03 -0400
commit75e9e9158f38e5cb21eff23b30bafa6f32e0a606 (patch)
treec52330f275112d47762a0ef0bde1efb5a82e748f
parent818f2a3cc34b0673dccd4188ce4a1862d9d90127 (diff)
drm/i915: kill mappable/fenceable disdinction
a00b10c360b35d6431a "Only enforce fence limits inside the GTT" also added a fenceable/mappable disdinction when binding/pinning buffers. This only complicates the code with no pratical gain: - In execbuffer this matters on for g33/pineview, as this is the only chip that needs fences and has an unmappable gtt area. But fences are only possible in the mappable part of the gtt, so need_fence implies need_mappable. And need_mappable is only set independantly with relocations which implies (for sane userspace) that the buffer is untiled. - The overlay code is only really used on i8xx, which doesn't have unmappable gtt. And it doesn't support tiled buffers, currently. - For all other buffers it's a bug to pass in a tiled bo. In short, this disdinction doesn't have any practical gain. I've also reverted mapping the overlay and context pages as possibly unmappable. It's not worth being overtly clever here, all the big gains from unmappable are for execbuf bos. Also add a comment for a clever optimization that confused me while reading the original patch by Chris Wilson. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c84
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
5 files changed, 56 insertions, 55 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 48d0aefec1f8..621234265454 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -752,8 +752,6 @@ struct drm_i915_gem_object {
752 * Advice: are the backing pages purgeable? 752 * Advice: are the backing pages purgeable?
753 */ 753 */
754 unsigned int madv : 2; 754 unsigned int madv : 2;
755 unsigned int fenceable : 1;
756 unsigned int mappable : 1;
757 755
758 /** 756 /**
759 * Current tiling mode for the object. 757 * Current tiling mode for the object.
@@ -773,6 +771,12 @@ struct drm_i915_gem_object {
773#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 771#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
774 772
775 /** 773 /**
774 * Is the object at the current location in the gtt mappable and
775 * fenceable? Used to avoid costly recalculations.
776 */
777 unsigned int map_and_fenceable : 1;
778
779 /**
776 * Whether the current gtt mapping needs to be mappable (and isn't just 780 * Whether the current gtt mapping needs to be mappable (and isn't just
777 * mappable by accident). Track pin and fault separate for a more 781 * mappable by accident). Track pin and fault separate for a more
778 * accurate mappable working set. 782 * accurate mappable working set.
@@ -1013,7 +1017,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
1013 size_t size); 1017 size_t size);
1014void i915_gem_free_object(struct drm_gem_object *obj); 1018void i915_gem_free_object(struct drm_gem_object *obj);
1015int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, 1019int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
1016 bool mappable, bool need_fence); 1020 bool map_and_fenceable);
1017void i915_gem_object_unpin(struct drm_gem_object *obj); 1021void i915_gem_object_unpin(struct drm_gem_object *obj);
1018int i915_gem_object_unbind(struct drm_gem_object *obj); 1022int i915_gem_object_unbind(struct drm_gem_object *obj);
1019void i915_gem_release_mmap(struct drm_gem_object *obj); 1023void i915_gem_release_mmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12dae003c011..47c665eeaf17 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -59,8 +59,7 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
59 bool interruptible); 59 bool interruptible);
60static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 60static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
61 unsigned alignment, 61 unsigned alignment,
62 bool mappable, 62 bool map_and_fenceable);
63 bool need_fence);
64static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 63static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
65static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 64static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
66 struct drm_i915_gem_pwrite *args, 65 struct drm_i915_gem_pwrite *args,
@@ -1074,7 +1073,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1074 else if (obj_priv->tiling_mode == I915_TILING_NONE && 1073 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
1075 obj_priv->gtt_space && 1074 obj_priv->gtt_space &&
1076 obj->write_domain != I915_GEM_DOMAIN_CPU) { 1075 obj->write_domain != I915_GEM_DOMAIN_CPU) {
1077 ret = i915_gem_object_pin(obj, 0, true, false); 1076 ret = i915_gem_object_pin(obj, 0, true);
1078 if (ret) 1077 if (ret)
1079 goto out; 1078 goto out;
1080 1079
@@ -1300,8 +1299,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1300 BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); 1299 BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable);
1301 1300
1302 if (obj_priv->gtt_space) { 1301 if (obj_priv->gtt_space) {
1303 if (!obj_priv->mappable || 1302 if (!obj_priv->map_and_fenceable) {
1304 (obj_priv->tiling_mode && !obj_priv->fenceable)) {
1305 ret = i915_gem_object_unbind(obj); 1303 ret = i915_gem_object_unbind(obj);
1306 if (ret) 1304 if (ret)
1307 goto unlock; 1305 goto unlock;
@@ -1309,8 +1307,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1309 } 1307 }
1310 1308
1311 if (!obj_priv->gtt_space) { 1309 if (!obj_priv->gtt_space) {
1312 ret = i915_gem_object_bind_to_gtt(obj, 0, 1310 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1313 true, obj_priv->tiling_mode);
1314 if (ret) 1311 if (ret)
1315 goto unlock; 1312 goto unlock;
1316 } 1313 }
@@ -2273,8 +2270,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2273 2270
2274 i915_gem_info_remove_gtt(dev_priv, obj_priv); 2271 i915_gem_info_remove_gtt(dev_priv, obj_priv);
2275 list_del_init(&obj_priv->mm_list); 2272 list_del_init(&obj_priv->mm_list);
2276 obj_priv->fenceable = true; 2273 /* Avoid an unnecessary call to unbind on rebind. */
2277 obj_priv->mappable = true; 2274 obj_priv->map_and_fenceable = true;
2278 2275
2279 drm_mm_put_block(obj_priv->gtt_space); 2276 drm_mm_put_block(obj_priv->gtt_space);
2280 obj_priv->gtt_space = NULL; 2277 obj_priv->gtt_space = NULL;
@@ -2383,7 +2380,7 @@ static void i915_write_fence_reg(struct drm_gem_object *obj)
2383 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 2380 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2384 (obj_priv->gtt_offset & (size - 1))) { 2381 (obj_priv->gtt_offset & (size - 1))) {
2385 WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", 2382 WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
2386 __func__, obj_priv->gtt_offset, obj_priv->fenceable, size, 2383 __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size,
2387 obj_priv->gtt_space->start, obj_priv->gtt_space->size); 2384 obj_priv->gtt_space->start, obj_priv->gtt_space->size);
2388 return; 2385 return;
2389 } 2386 }
@@ -2687,8 +2684,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2687static int 2684static int
2688i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 2685i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2689 unsigned alignment, 2686 unsigned alignment,
2690 bool mappable, 2687 bool map_and_fenceable)
2691 bool need_fence)
2692{ 2688{
2693 struct drm_device *dev = obj->dev; 2689 struct drm_device *dev = obj->dev;
2694 drm_i915_private_t *dev_priv = dev->dev_private; 2690 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2696,6 +2692,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2696 struct drm_mm_node *free_space; 2692 struct drm_mm_node *free_space;
2697 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2693 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2698 u32 size, fence_size, fence_alignment; 2694 u32 size, fence_size, fence_alignment;
2695 bool mappable, fenceable;
2699 int ret; 2696 int ret;
2700 2697
2701 if (obj_priv->madv != I915_MADV_WILLNEED) { 2698 if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2707,25 +2704,25 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2707 fence_alignment = i915_gem_get_gtt_alignment(obj_priv); 2704 fence_alignment = i915_gem_get_gtt_alignment(obj_priv);
2708 2705
2709 if (alignment == 0) 2706 if (alignment == 0)
2710 alignment = need_fence ? fence_alignment : 4096; 2707 alignment = map_and_fenceable ? fence_alignment : 4096;
2711 if (need_fence && alignment & (fence_alignment - 1)) { 2708 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2712 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 2709 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2713 return -EINVAL; 2710 return -EINVAL;
2714 } 2711 }
2715 2712
2716 size = need_fence ? fence_size : obj->size; 2713 size = map_and_fenceable ? fence_size : obj->size;
2717 2714
2718 /* If the object is bigger than the entire aperture, reject it early 2715 /* If the object is bigger than the entire aperture, reject it early
2719 * before evicting everything in a vain attempt to find space. 2716 * before evicting everything in a vain attempt to find space.
2720 */ 2717 */
2721 if (obj->size > 2718 if (obj->size >
2722 (mappable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { 2719 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2723 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2720 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2724 return -E2BIG; 2721 return -E2BIG;
2725 } 2722 }
2726 2723
2727 search_free: 2724 search_free:
2728 if (mappable) 2725 if (map_and_fenceable)
2729 free_space = 2726 free_space =
2730 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, 2727 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2731 size, alignment, 0, 2728 size, alignment, 0,
@@ -2736,7 +2733,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2736 size, alignment, 0); 2733 size, alignment, 0);
2737 2734
2738 if (free_space != NULL) { 2735 if (free_space != NULL) {
2739 if (mappable) 2736 if (map_and_fenceable)
2740 obj_priv->gtt_space = 2737 obj_priv->gtt_space =
2741 drm_mm_get_block_range_generic(free_space, 2738 drm_mm_get_block_range_generic(free_space,
2742 size, alignment, 0, 2739 size, alignment, 0,
@@ -2750,7 +2747,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2750 /* If the gtt is empty and we're still having trouble 2747 /* If the gtt is empty and we're still having trouble
2751 * fitting our object in, we're out of memory. 2748 * fitting our object in, we're out of memory.
2752 */ 2749 */
2753 ret = i915_gem_evict_something(dev, size, alignment, mappable); 2750 ret = i915_gem_evict_something(dev, size, alignment,
2751 map_and_fenceable);
2754 if (ret) 2752 if (ret)
2755 return ret; 2753 return ret;
2756 2754
@@ -2765,7 +2763,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2765 if (ret == -ENOMEM) { 2763 if (ret == -ENOMEM) {
2766 /* first try to clear up some space from the GTT */ 2764 /* first try to clear up some space from the GTT */
2767 ret = i915_gem_evict_something(dev, size, 2765 ret = i915_gem_evict_something(dev, size,
2768 alignment, mappable); 2766 alignment,
2767 map_and_fenceable);
2769 if (ret) { 2768 if (ret) {
2770 /* now try to shrink everyone else */ 2769 /* now try to shrink everyone else */
2771 if (gfpmask) { 2770 if (gfpmask) {
@@ -2796,7 +2795,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2796 obj_priv->gtt_space = NULL; 2795 obj_priv->gtt_space = NULL;
2797 2796
2798 ret = i915_gem_evict_something(dev, size, 2797 ret = i915_gem_evict_something(dev, size,
2799 alignment, mappable); 2798 alignment, map_and_fenceable);
2800 if (ret) 2799 if (ret)
2801 return ret; 2800 return ret;
2802 2801
@@ -2816,15 +2815,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2816 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2815 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2817 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2816 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2818 2817
2819 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable); 2818 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable);
2820 2819
2821 obj_priv->fenceable = 2820 fenceable =
2822 obj_priv->gtt_space->size == fence_size && 2821 obj_priv->gtt_space->size == fence_size &&
2823 (obj_priv->gtt_space->start & (fence_alignment -1)) == 0; 2822 (obj_priv->gtt_space->start & (fence_alignment -1)) == 0;
2824 2823
2825 obj_priv->mappable = 2824 mappable =
2826 obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; 2825 obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end;
2827 2826
2827 obj_priv->map_and_fenceable = mappable && fenceable;
2828
2828 return 0; 2829 return 0;
2829} 2830}
2830 2831
@@ -3538,8 +3539,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3538 entry->relocation_count ? true : need_fence; 3539 entry->relocation_count ? true : need_fence;
3539 3540
3540 /* Check fence reg constraints and rebind if necessary */ 3541 /* Check fence reg constraints and rebind if necessary */
3541 if ((need_fence && !obj->fenceable) || 3542 if (need_mappable && !obj->map_and_fenceable) {
3542 (need_mappable && !obj->mappable)) {
3543 ret = i915_gem_object_unbind(&obj->base); 3543 ret = i915_gem_object_unbind(&obj->base);
3544 if (ret) 3544 if (ret)
3545 break; 3545 break;
@@ -3547,8 +3547,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3547 3547
3548 ret = i915_gem_object_pin(&obj->base, 3548 ret = i915_gem_object_pin(&obj->base,
3549 entry->alignment, 3549 entry->alignment,
3550 need_mappable, 3550 need_mappable);
3551 need_fence);
3552 if (ret) 3551 if (ret)
3553 break; 3552 break;
3554 3553
@@ -4143,7 +4142,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
4143 4142
4144int 4143int
4145i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, 4144i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
4146 bool mappable, bool need_fence) 4145 bool map_and_fenceable)
4147{ 4146{
4148 struct drm_device *dev = obj->dev; 4147 struct drm_device *dev = obj->dev;
4149 struct drm_i915_private *dev_priv = dev->dev_private; 4148 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4151,19 +4150,19 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
4151 int ret; 4150 int ret;
4152 4151
4153 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 4152 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4154 BUG_ON(need_fence && !mappable); 4153 BUG_ON(map_and_fenceable && !map_and_fenceable);
4155 WARN_ON(i915_verify_lists(dev)); 4154 WARN_ON(i915_verify_lists(dev));
4156 4155
4157 if (obj_priv->gtt_space != NULL) { 4156 if (obj_priv->gtt_space != NULL) {
4158 if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || 4157 if ((alignment && obj_priv->gtt_offset & (alignment - 1)) ||
4159 (need_fence && !obj_priv->fenceable) || 4158 (map_and_fenceable && !obj_priv->map_and_fenceable)) {
4160 (mappable && !obj_priv->mappable)) {
4161 WARN(obj_priv->pin_count, 4159 WARN(obj_priv->pin_count,
4162 "bo is already pinned with incorrect alignment:" 4160 "bo is already pinned with incorrect alignment:"
4163 " offset=%x, req.alignment=%x, need_fence=%d, fenceable=%d, mappable=%d, cpu_accessible=%d\n", 4161 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
4162 " obj->map_and_fenceable=%d\n",
4164 obj_priv->gtt_offset, alignment, 4163 obj_priv->gtt_offset, alignment,
4165 need_fence, obj_priv->fenceable, 4164 map_and_fenceable,
4166 mappable, obj_priv->mappable); 4165 obj_priv->map_and_fenceable);
4167 ret = i915_gem_object_unbind(obj); 4166 ret = i915_gem_object_unbind(obj);
4168 if (ret) 4167 if (ret)
4169 return ret; 4168 return ret;
@@ -4172,18 +4171,18 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
4172 4171
4173 if (obj_priv->gtt_space == NULL) { 4172 if (obj_priv->gtt_space == NULL) {
4174 ret = i915_gem_object_bind_to_gtt(obj, alignment, 4173 ret = i915_gem_object_bind_to_gtt(obj, alignment,
4175 mappable, need_fence); 4174 map_and_fenceable);
4176 if (ret) 4175 if (ret)
4177 return ret; 4176 return ret;
4178 } 4177 }
4179 4178
4180 if (obj_priv->pin_count++ == 0) { 4179 if (obj_priv->pin_count++ == 0) {
4181 i915_gem_info_add_pin(dev_priv, obj_priv, mappable); 4180 i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable);
4182 if (!obj_priv->active) 4181 if (!obj_priv->active)
4183 list_move_tail(&obj_priv->mm_list, 4182 list_move_tail(&obj_priv->mm_list,
4184 &dev_priv->mm.pinned_list); 4183 &dev_priv->mm.pinned_list);
4185 } 4184 }
4186 BUG_ON(!obj_priv->pin_mappable && mappable); 4185 BUG_ON(!obj_priv->pin_mappable && map_and_fenceable);
4187 4186
4188 WARN_ON(i915_verify_lists(dev)); 4187 WARN_ON(i915_verify_lists(dev));
4189 return 0; 4188 return 0;
@@ -4245,8 +4244,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4245 obj_priv->user_pin_count++; 4244 obj_priv->user_pin_count++;
4246 obj_priv->pin_filp = file_priv; 4245 obj_priv->pin_filp = file_priv;
4247 if (obj_priv->user_pin_count == 1) { 4246 if (obj_priv->user_pin_count == 1) {
4248 ret = i915_gem_object_pin(obj, args->alignment, 4247 ret = i915_gem_object_pin(obj, args->alignment, true);
4249 true, obj_priv->tiling_mode);
4250 if (ret) 4248 if (ret)
4251 goto out; 4249 goto out;
4252 } 4250 }
@@ -4439,8 +4437,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4439 INIT_LIST_HEAD(&obj->ring_list); 4437 INIT_LIST_HEAD(&obj->ring_list);
4440 INIT_LIST_HEAD(&obj->gpu_write_list); 4438 INIT_LIST_HEAD(&obj->gpu_write_list);
4441 obj->madv = I915_MADV_WILLNEED; 4439 obj->madv = I915_MADV_WILLNEED;
4442 obj->fenceable = true; 4440 /* Avoid an unnecessary call to unbind on the first bind. */
4443 obj->mappable = true; 4441 obj->map_and_fenceable = true;
4444 4442
4445 return &obj->base; 4443 return &obj->base;
4446} 4444}
@@ -4560,7 +4558,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
4560 obj_priv = to_intel_bo(obj); 4558 obj_priv = to_intel_bo(obj);
4561 obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 4559 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4562 4560
4563 ret = i915_gem_object_pin(obj, 4096, true, false); 4561 ret = i915_gem_object_pin(obj, 4096, true);
4564 if (ret) 4562 if (ret)
4565 goto err_unref; 4563 goto err_unref;
4566 4564
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a2cd579eb9b2..77b34942dc91 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1461,8 +1461,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1461 BUG(); 1461 BUG();
1462 } 1462 }
1463 1463
1464 ret = i915_gem_object_pin(obj, alignment, true, 1464 ret = i915_gem_object_pin(obj, alignment, true);
1465 obj_priv->tiling_mode);
1466 if (ret) 1465 if (ret)
1467 return ret; 1466 return ret;
1468 1467
@@ -4367,7 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4367 /* we only need to pin inside GTT if cursor is non-phy */ 4366 /* we only need to pin inside GTT if cursor is non-phy */
4368 mutex_lock(&dev->struct_mutex); 4367 mutex_lock(&dev->struct_mutex);
4369 if (!dev_priv->info->cursor_needs_physical) { 4368 if (!dev_priv->info->cursor_needs_physical) {
4370 ret = i915_gem_object_pin(bo, PAGE_SIZE, true, false); 4369 ret = i915_gem_object_pin(bo, PAGE_SIZE, true);
4371 if (ret) { 4370 if (ret) {
4372 DRM_ERROR("failed to pin cursor bo\n"); 4371 DRM_ERROR("failed to pin cursor bo\n");
4373 goto fail_locked; 4372 goto fail_locked;
@@ -5531,7 +5530,7 @@ intel_alloc_context_page(struct drm_device *dev)
5531 } 5530 }
5532 5531
5533 mutex_lock(&dev->struct_mutex); 5532 mutex_lock(&dev->struct_mutex);
5534 ret = i915_gem_object_pin(ctx, 4096, false, false); 5533 ret = i915_gem_object_pin(ctx, 4096, true);
5535 if (ret) { 5534 if (ret) {
5536 DRM_ERROR("failed to pin power context: %d\n", ret); 5535 DRM_ERROR("failed to pin power context: %d\n", ret);
5537 goto err_unref; 5536 goto err_unref;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 659f8349a15c..ec8ffaccbbdb 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -781,7 +781,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
781 if (ret != 0) 781 if (ret != 0)
782 return ret; 782 return ret;
783 783
784 ret = i915_gem_object_pin(new_bo, PAGE_SIZE, false, false); 784 ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
785 if (ret != 0) 785 if (ret != 0)
786 return ret; 786 return ret;
787 787
@@ -1425,7 +1425,7 @@ void intel_setup_overlay(struct drm_device *dev)
1425 } 1425 }
1426 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; 1426 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
1427 } else { 1427 } else {
1428 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); 1428 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
1429 if (ret) { 1429 if (ret) {
1430 DRM_ERROR("failed to pin overlay register bo\n"); 1430 DRM_ERROR("failed to pin overlay register bo\n");
1431 goto out_free_bo; 1431 goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 85071570e1f9..78a5061a58f6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -547,7 +547,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
547 obj_priv = to_intel_bo(obj); 547 obj_priv = to_intel_bo(obj);
548 obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 548 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
549 549
550 ret = i915_gem_object_pin(obj, 4096, true, false); 550 ret = i915_gem_object_pin(obj, 4096, true);
551 if (ret != 0) { 551 if (ret != 0) {
552 goto err_unref; 552 goto err_unref;
553 } 553 }
@@ -602,7 +602,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
602 602
603 ring->gem_object = obj; 603 ring->gem_object = obj;
604 604
605 ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); 605 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
606 if (ret) 606 if (ret)
607 goto err_unref; 607 goto err_unref;
608 608
@@ -906,7 +906,7 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
906 if (obj == NULL) 906 if (obj == NULL)
907 return -ENOMEM; 907 return -ENOMEM;
908 908
909 ret = i915_gem_object_pin(&obj->base, 4096, true, false); 909 ret = i915_gem_object_pin(&obj->base, 4096, true);
910 if (ret) { 910 if (ret) {
911 drm_gem_object_unreference(&obj->base); 911 drm_gem_object_unreference(&obj->base);
912 return ret; 912 return ret;