aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-02-14 08:01:11 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-02-14 08:16:58 -0500
commit1ec9e26ddab06459e89a890431b2de064c5d1056 (patch)
tree6ea1f8cfcffa2acf9ffec2fc12b197cc65e1d978
parent931c1c26983b4f84e33b78579fc8d57e4a14c6b4 (diff)
drm/i915: Consolidate binding parameters into flags
Anything more than just one bool parameter is just a pain to read, symbolic constants are much better. Split out from Chris' vma-binding rework patch. v2: Undo the behaviour change in object_pin that Chris spotted. v3: Split out misplaced hunk to handle set_cache_level errors, spotted by Jani. v4: Keep the current over-zealous binding logic in the execbuffer code working with a quick hack while the overall binding code gets shuffled around. v5: Reorder the PIN_ flags for more natural patch splitup. v6: Pull out the PIN_GLOBAL split-up again. Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ben Widawsky <benjamin.widawsky@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c62
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h20
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c11
10 files changed, 70 insertions, 81 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d33199452c11..8a6db27dd966 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2076,11 +2076,12 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
2076void i915_gem_free_object(struct drm_gem_object *obj); 2076void i915_gem_free_object(struct drm_gem_object *obj);
2077void i915_gem_vma_destroy(struct i915_vma *vma); 2077void i915_gem_vma_destroy(struct i915_vma *vma);
2078 2078
2079#define PIN_MAPPABLE 0x1
2080#define PIN_NONBLOCK 0x2
2079int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2081int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2080 struct i915_address_space *vm, 2082 struct i915_address_space *vm,
2081 uint32_t alignment, 2083 uint32_t alignment,
2082 bool map_and_fenceable, 2084 unsigned flags);
2083 bool nonblocking);
2084void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 2085void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
2085int __must_check i915_vma_unbind(struct i915_vma *vma); 2086int __must_check i915_vma_unbind(struct i915_vma *vma);
2086int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); 2087int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
@@ -2283,11 +2284,9 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2283static inline int __must_check 2284static inline int __must_check
2284i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 2285i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2285 uint32_t alignment, 2286 uint32_t alignment,
2286 bool map_and_fenceable, 2287 unsigned flags)
2287 bool nonblocking)
2288{ 2288{
2289 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, 2289 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags);
2290 map_and_fenceable, nonblocking);
2291} 2290}
2292 2291
2293/* i915_gem_context.c */ 2292/* i915_gem_context.c */
@@ -2331,8 +2330,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2331 int min_size, 2330 int min_size,
2332 unsigned alignment, 2331 unsigned alignment,
2333 unsigned cache_level, 2332 unsigned cache_level,
2334 bool mappable, 2333 unsigned flags);
2335 bool nonblock);
2336int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2334int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2337int i915_gem_evict_everything(struct drm_device *dev); 2335int i915_gem_evict_everything(struct drm_device *dev);
2338 2336
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dee560267b1d..aa263e371ebc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly); 45 bool readonly);
46static __must_check int
47i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
48 struct i915_address_space *vm,
49 unsigned alignment,
50 bool map_and_fenceable,
51 bool nonblocking);
52static int i915_gem_phys_pwrite(struct drm_device *dev, 46static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj, 47 struct drm_i915_gem_object *obj,
54 struct drm_i915_gem_pwrite *args, 48 struct drm_i915_gem_pwrite *args,
@@ -605,7 +599,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
605 char __user *user_data; 599 char __user *user_data;
606 int page_offset, page_length, ret; 600 int page_offset, page_length, ret;
607 601
608 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); 602 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
609 if (ret) 603 if (ret)
610 goto out; 604 goto out;
611 605
@@ -1411,7 +1405,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1411 } 1405 }
1412 1406
1413 /* Now bind it into the GTT if needed */ 1407 /* Now bind it into the GTT if needed */
1414 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); 1408 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1415 if (ret) 1409 if (ret)
1416 goto unlock; 1410 goto unlock;
1417 1411
@@ -2721,7 +2715,6 @@ int i915_vma_unbind(struct i915_vma *vma)
2721 2715
2722 if (!drm_mm_node_allocated(&vma->node)) { 2716 if (!drm_mm_node_allocated(&vma->node)) {
2723 i915_gem_vma_destroy(vma); 2717 i915_gem_vma_destroy(vma);
2724
2725 return 0; 2718 return 0;
2726 } 2719 }
2727 2720
@@ -3219,14 +3212,13 @@ static int
3219i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3212i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3220 struct i915_address_space *vm, 3213 struct i915_address_space *vm,
3221 unsigned alignment, 3214 unsigned alignment,
3222 bool map_and_fenceable, 3215 unsigned flags)
3223 bool nonblocking)
3224{ 3216{
3225 struct drm_device *dev = obj->base.dev; 3217 struct drm_device *dev = obj->base.dev;
3226 drm_i915_private_t *dev_priv = dev->dev_private; 3218 drm_i915_private_t *dev_priv = dev->dev_private;
3227 u32 size, fence_size, fence_alignment, unfenced_alignment; 3219 u32 size, fence_size, fence_alignment, unfenced_alignment;
3228 size_t gtt_max = 3220 size_t gtt_max =
3229 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; 3221 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3230 struct i915_vma *vma; 3222 struct i915_vma *vma;
3231 int ret; 3223 int ret;
3232 3224
@@ -3238,18 +3230,18 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3238 obj->tiling_mode, true); 3230 obj->tiling_mode, true);
3239 unfenced_alignment = 3231 unfenced_alignment =
3240 i915_gem_get_gtt_alignment(dev, 3232 i915_gem_get_gtt_alignment(dev,
3241 obj->base.size, 3233 obj->base.size,
3242 obj->tiling_mode, false); 3234 obj->tiling_mode, false);
3243 3235
3244 if (alignment == 0) 3236 if (alignment == 0)
3245 alignment = map_and_fenceable ? fence_alignment : 3237 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3246 unfenced_alignment; 3238 unfenced_alignment;
3247 if (map_and_fenceable && alignment & (fence_alignment - 1)) { 3239 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3248 DRM_DEBUG("Invalid object alignment requested %u\n", alignment); 3240 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3249 return -EINVAL; 3241 return -EINVAL;
3250 } 3242 }
3251 3243
3252 size = map_and_fenceable ? fence_size : obj->base.size; 3244 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3253 3245
3254 /* If the object is bigger than the entire aperture, reject it early 3246 /* If the object is bigger than the entire aperture, reject it early
3255 * before evicting everything in a vain attempt to find space. 3247 * before evicting everything in a vain attempt to find space.
@@ -3257,7 +3249,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3257 if (obj->base.size > gtt_max) { 3249 if (obj->base.size > gtt_max) {
3258 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3250 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3259 obj->base.size, 3251 obj->base.size,
3260 map_and_fenceable ? "mappable" : "total", 3252 flags & PIN_MAPPABLE ? "mappable" : "total",
3261 gtt_max); 3253 gtt_max);
3262 return -E2BIG; 3254 return -E2BIG;
3263 } 3255 }
@@ -3281,9 +3273,7 @@ search_free:
3281 DRM_MM_SEARCH_DEFAULT); 3273 DRM_MM_SEARCH_DEFAULT);
3282 if (ret) { 3274 if (ret) {
3283 ret = i915_gem_evict_something(dev, vm, size, alignment, 3275 ret = i915_gem_evict_something(dev, vm, size, alignment,
3284 obj->cache_level, 3276 obj->cache_level, flags);
3285 map_and_fenceable,
3286 nonblocking);
3287 if (ret == 0) 3277 if (ret == 0)
3288 goto search_free; 3278 goto search_free;
3289 3279
@@ -3314,9 +3304,9 @@ search_free:
3314 obj->map_and_fenceable = mappable && fenceable; 3304 obj->map_and_fenceable = mappable && fenceable;
3315 } 3305 }
3316 3306
3317 WARN_ON(map_and_fenceable && !obj->map_and_fenceable); 3307 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3318 3308
3319 trace_i915_vma_bind(vma, map_and_fenceable); 3309 trace_i915_vma_bind(vma, flags);
3320 i915_gem_verify_gtt(dev); 3310 i915_gem_verify_gtt(dev);
3321 return 0; 3311 return 0;
3322 3312
@@ -3687,7 +3677,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3687 * (e.g. libkms for the bootup splash), we have to ensure that we 3677 * (e.g. libkms for the bootup splash), we have to ensure that we
3688 * always use map_and_fenceable for all scanout buffers. 3678 * always use map_and_fenceable for all scanout buffers.
3689 */ 3679 */
3690 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); 3680 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3691 if (ret) 3681 if (ret)
3692 goto err_unpin_display; 3682 goto err_unpin_display;
3693 3683
@@ -3843,30 +3833,28 @@ int
3843i915_gem_object_pin(struct drm_i915_gem_object *obj, 3833i915_gem_object_pin(struct drm_i915_gem_object *obj,
3844 struct i915_address_space *vm, 3834 struct i915_address_space *vm,
3845 uint32_t alignment, 3835 uint32_t alignment,
3846 bool map_and_fenceable, 3836 unsigned flags)
3847 bool nonblocking)
3848{ 3837{
3849 const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
3850 struct i915_vma *vma; 3838 struct i915_vma *vma;
3851 int ret; 3839 int ret;
3852 3840
3853 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); 3841 if (WARN_ON(flags & PIN_MAPPABLE && !i915_is_ggtt(vm)))
3842 return -EINVAL;
3854 3843
3855 vma = i915_gem_obj_to_vma(obj, vm); 3844 vma = i915_gem_obj_to_vma(obj, vm);
3856
3857 if (vma) { 3845 if (vma) {
3858 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3846 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3859 return -EBUSY; 3847 return -EBUSY;
3860 3848
3861 if ((alignment && 3849 if ((alignment &&
3862 vma->node.start & (alignment - 1)) || 3850 vma->node.start & (alignment - 1)) ||
3863 (map_and_fenceable && !obj->map_and_fenceable)) { 3851 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3864 WARN(vma->pin_count, 3852 WARN(vma->pin_count,
3865 "bo is already pinned with incorrect alignment:" 3853 "bo is already pinned with incorrect alignment:"
3866 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3854 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3867 " obj->map_and_fenceable=%d\n", 3855 " obj->map_and_fenceable=%d\n",
3868 i915_gem_obj_offset(obj, vm), alignment, 3856 i915_gem_obj_offset(obj, vm), alignment,
3869 map_and_fenceable, 3857 flags & PIN_MAPPABLE,
3870 obj->map_and_fenceable); 3858 obj->map_and_fenceable);
3871 ret = i915_vma_unbind(vma); 3859 ret = i915_vma_unbind(vma);
3872 if (ret) 3860 if (ret)
@@ -3875,9 +3863,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3875 } 3863 }
3876 3864
3877 if (!i915_gem_obj_bound(obj, vm)) { 3865 if (!i915_gem_obj_bound(obj, vm)) {
3878 ret = i915_gem_object_bind_to_vm(obj, vm, alignment, 3866 ret = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3879 map_and_fenceable,
3880 nonblocking);
3881 if (ret) 3867 if (ret)
3882 return ret; 3868 return ret;
3883 3869
@@ -3885,10 +3871,12 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3885 3871
3886 vma = i915_gem_obj_to_vma(obj, vm); 3872 vma = i915_gem_obj_to_vma(obj, vm);
3887 3873
3888 vma->bind_vma(vma, obj->cache_level, flags); 3874 vma->bind_vma(vma, obj->cache_level,
3875 flags & PIN_MAPPABLE ? GLOBAL_BIND : 0);
3889 3876
3890 i915_gem_obj_to_vma(obj, vm)->pin_count++; 3877 i915_gem_obj_to_vma(obj, vm)->pin_count++;
3891 obj->pin_mappable |= map_and_fenceable; 3878 if (flags & PIN_MAPPABLE)
3879 obj->pin_mappable |= true;
3892 3880
3893 return 0; 3881 return 0;
3894} 3882}
@@ -3946,7 +3934,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3946 } 3934 }
3947 3935
3948 if (obj->user_pin_count == 0) { 3936 if (obj->user_pin_count == 0) {
3949 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); 3937 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
3950 if (ret) 3938 if (ret)
3951 goto out; 3939 goto out;
3952 } 3940 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 19fd3629795c..f8c21a6dd663 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -258,8 +258,7 @@ i915_gem_create_context(struct drm_device *dev,
258 * context. 258 * context.
259 */ 259 */
260 ret = i915_gem_obj_ggtt_pin(ctx->obj, 260 ret = i915_gem_obj_ggtt_pin(ctx->obj,
261 get_context_alignment(dev), 261 get_context_alignment(dev), 0);
262 false, false);
263 if (ret) { 262 if (ret) {
264 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 263 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
265 goto err_destroy; 264 goto err_destroy;
@@ -335,8 +334,7 @@ void i915_gem_context_reset(struct drm_device *dev)
335 334
336 if (i == RCS) { 335 if (i == RCS) {
337 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 336 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
338 get_context_alignment(dev), 337 get_context_alignment(dev), 0));
339 false, false));
340 /* Fake a finish/inactive */ 338 /* Fake a finish/inactive */
341 dctx->obj->base.write_domain = 0; 339 dctx->obj->base.write_domain = 0;
342 dctx->obj->active = 0; 340 dctx->obj->active = 0;
@@ -612,8 +610,7 @@ static int do_switch(struct intel_ring_buffer *ring,
612 /* Trying to pin first makes error handling easier. */ 610 /* Trying to pin first makes error handling easier. */
613 if (ring == &dev_priv->ring[RCS]) { 611 if (ring == &dev_priv->ring[RCS]) {
614 ret = i915_gem_obj_ggtt_pin(to->obj, 612 ret = i915_gem_obj_ggtt_pin(to->obj,
615 get_context_alignment(ring->dev), 613 get_context_alignment(ring->dev), 0);
616 false, false);
617 if (ret) 614 if (ret)
618 return ret; 615 return ret;
619 } 616 }
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5168d6a08054..8a78f7885cba 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,7 +68,7 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
68int 68int
69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
71 bool mappable, bool nonblocking) 71 unsigned flags)
72{ 72{
73 drm_i915_private_t *dev_priv = dev->dev_private; 73 drm_i915_private_t *dev_priv = dev->dev_private;
74 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
@@ -76,7 +76,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
76 int ret = 0; 76 int ret = 0;
77 int pass = 0; 77 int pass = 0;
78 78
79 trace_i915_gem_evict(dev, min_size, alignment, mappable); 79 trace_i915_gem_evict(dev, min_size, alignment, flags);
80 80
81 /* 81 /*
82 * The goal is to evict objects and amalgamate space in LRU order. 82 * The goal is to evict objects and amalgamate space in LRU order.
@@ -102,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
102 */ 102 */
103 103
104 INIT_LIST_HEAD(&unwind_list); 104 INIT_LIST_HEAD(&unwind_list);
105 if (mappable) { 105 if (flags & PIN_MAPPABLE) {
106 BUG_ON(!i915_is_ggtt(vm)); 106 BUG_ON(!i915_is_ggtt(vm));
107 drm_mm_init_scan_with_range(&vm->mm, min_size, 107 drm_mm_init_scan_with_range(&vm->mm, min_size,
108 alignment, cache_level, 0, 108 alignment, cache_level, 0,
@@ -117,7 +117,7 @@ search_again:
117 goto found; 117 goto found;
118 } 118 }
119 119
120 if (nonblocking) 120 if (flags & PIN_NONBLOCK)
121 goto none; 121 goto none;
122 122
123 /* Now merge in the soon-to-be-expired objects... */ 123 /* Now merge in the soon-to-be-expired objects... */
@@ -141,7 +141,7 @@ none:
141 /* Can we unpin some objects such as idle hw contents, 141 /* Can we unpin some objects such as idle hw contents,
142 * or pending flips? 142 * or pending flips?
143 */ 143 */
144 if (nonblocking) 144 if (flags & PIN_NONBLOCK)
145 return -ENOSPC; 145 return -ENOSPC;
146 146
147 /* Only idle the GPU and repeat the search once */ 147 /* Only idle the GPU and repeat the search once */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 032def901f98..013bd5ab3913 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -544,19 +544,23 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
544 struct drm_i915_gem_object *obj = vma->obj; 544 struct drm_i915_gem_object *obj = vma->obj;
545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
547 bool need_fence, need_mappable; 547 bool need_fence;
548 u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) && 548 unsigned flags;
549 !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
550 int ret; 549 int ret;
551 550
551 flags = 0;
552
552 need_fence = 553 need_fence =
553 has_fenced_gpu_access && 554 has_fenced_gpu_access &&
554 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 555 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
555 obj->tiling_mode != I915_TILING_NONE; 556 obj->tiling_mode != I915_TILING_NONE;
556 need_mappable = need_fence || need_reloc_mappable(vma); 557 if (need_fence || need_reloc_mappable(vma))
558 flags |= PIN_MAPPABLE;
557 559
558 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable, 560 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
559 false); 561 flags |= PIN_MAPPABLE;
562
563 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
560 if (ret) 564 if (ret)
561 return ret; 565 return ret;
562 566
@@ -585,6 +589,9 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
585 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; 589 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
586 } 590 }
587 591
592 /* Temporary hack while we rework the binding logic. */
593 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
594 !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
588 vma->bind_vma(vma, obj->cache_level, flags); 595 vma->bind_vma(vma, obj->cache_level, flags);
589 596
590 return 0; 597 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ee38fafc6917..1dcd50541ae2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -885,7 +885,7 @@ alloc:
885 if (ret == -ENOSPC && !retried) { 885 if (ret == -ENOSPC && !retried) {
886 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 886 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
887 GEN6_PD_SIZE, GEN6_PD_ALIGN, 887 GEN6_PD_SIZE, GEN6_PD_ALIGN,
888 I915_CACHE_NONE, false, true); 888 I915_CACHE_NONE, PIN_NONBLOCK);
889 if (ret) 889 if (ret)
890 return ret; 890 return ret;
891 891
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 6e580c98dede..b95a380958db 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -34,15 +34,15 @@ TRACE_EVENT(i915_gem_object_create,
34); 34);
35 35
36TRACE_EVENT(i915_vma_bind, 36TRACE_EVENT(i915_vma_bind,
37 TP_PROTO(struct i915_vma *vma, bool mappable), 37 TP_PROTO(struct i915_vma *vma, unsigned flags),
38 TP_ARGS(vma, mappable), 38 TP_ARGS(vma, flags),
39 39
40 TP_STRUCT__entry( 40 TP_STRUCT__entry(
41 __field(struct drm_i915_gem_object *, obj) 41 __field(struct drm_i915_gem_object *, obj)
42 __field(struct i915_address_space *, vm) 42 __field(struct i915_address_space *, vm)
43 __field(u32, offset) 43 __field(u32, offset)
44 __field(u32, size) 44 __field(u32, size)
45 __field(bool, mappable) 45 __field(unsigned, flags)
46 ), 46 ),
47 47
48 TP_fast_assign( 48 TP_fast_assign(
@@ -50,12 +50,12 @@ TRACE_EVENT(i915_vma_bind,
50 __entry->vm = vma->vm; 50 __entry->vm = vma->vm;
51 __entry->offset = vma->node.start; 51 __entry->offset = vma->node.start;
52 __entry->size = vma->node.size; 52 __entry->size = vma->node.size;
53 __entry->mappable = mappable; 53 __entry->flags = flags;
54 ), 54 ),
55 55
56 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p", 56 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
57 __entry->obj, __entry->offset, __entry->size, 57 __entry->obj, __entry->offset, __entry->size,
58 __entry->mappable ? ", mappable" : "", 58 __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
59 __entry->vm) 59 __entry->vm)
60); 60);
61 61
@@ -196,26 +196,26 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
196); 196);
197 197
198TRACE_EVENT(i915_gem_evict, 198TRACE_EVENT(i915_gem_evict,
199 TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), 199 TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
200 TP_ARGS(dev, size, align, mappable), 200 TP_ARGS(dev, size, align, flags),
201 201
202 TP_STRUCT__entry( 202 TP_STRUCT__entry(
203 __field(u32, dev) 203 __field(u32, dev)
204 __field(u32, size) 204 __field(u32, size)
205 __field(u32, align) 205 __field(u32, align)
206 __field(bool, mappable) 206 __field(unsigned, flags)
207 ), 207 ),
208 208
209 TP_fast_assign( 209 TP_fast_assign(
210 __entry->dev = dev->primary->index; 210 __entry->dev = dev->primary->index;
211 __entry->size = size; 211 __entry->size = size;
212 __entry->align = align; 212 __entry->align = align;
213 __entry->mappable = mappable; 213 __entry->flags = flags;
214 ), 214 ),
215 215
216 TP_printk("dev=%d, size=%d, align=%d %s", 216 TP_printk("dev=%d, size=%d, align=%d %s",
217 __entry->dev, __entry->size, __entry->align, 217 __entry->dev, __entry->size, __entry->align,
218 __entry->mappable ? ", mappable" : "") 218 __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
219); 219);
220 220
221TRACE_EVENT(i915_gem_evict_everything, 221TRACE_EVENT(i915_gem_evict_everything,
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 424f0946d8c4..ac519cb46f22 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1349,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev)
1349 } 1349 }
1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1351 } else { 1351 } else {
1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false); 1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
1353 if (ret) { 1353 if (ret) {
1354 DRM_ERROR("failed to pin overlay register bo\n"); 1354 DRM_ERROR("failed to pin overlay register bo\n");
1355 goto out_free_bo; 1355 goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e4a0c9cc226d..136647037244 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2741,7 +2741,7 @@ intel_alloc_context_page(struct drm_device *dev)
2741 return NULL; 2741 return NULL;
2742 } 2742 }
2743 2743
2744 ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false); 2744 ret = i915_gem_obj_ggtt_pin(ctx, 4096, PIN_MAPPABLE);
2745 if (ret) { 2745 if (ret) {
2746 DRM_ERROR("failed to pin power context: %d\n", ret); 2746 DRM_ERROR("failed to pin power context: %d\n", ret);
2747 goto err_unref; 2747 goto err_unref;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ae6d234b8c12..f256d5fe46f8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -533,7 +533,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
533 533
534 i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 534 i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
535 535
536 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); 536 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
537 if (ret) 537 if (ret)
538 goto err_unref; 538 goto err_unref;
539 539
@@ -1273,10 +1273,9 @@ static int init_status_page(struct intel_ring_buffer *ring)
1273 1273
1274 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1274 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1275 1275
1276 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); 1276 ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_MAPPABLE);
1277 if (ret != 0) { 1277 if (ret)
1278 goto err_unref; 1278 goto err_unref;
1279 }
1280 1279
1281 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1280 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1282 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1281 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
@@ -1356,7 +1355,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1356 1355
1357 ring->obj = obj; 1356 ring->obj = obj;
1358 1357
1359 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); 1358 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1360 if (ret) 1359 if (ret)
1361 goto err_unref; 1360 goto err_unref;
1362 1361
@@ -1919,7 +1918,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1919 return -ENOMEM; 1918 return -ENOMEM;
1920 } 1919 }
1921 1920
1922 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); 1921 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1923 if (ret != 0) { 1922 if (ret != 0) {
1924 drm_gem_object_unreference(&obj->base); 1923 drm_gem_object_unreference(&obj->base);
1925 DRM_ERROR("Failed to ping batch bo\n"); 1924 DRM_ERROR("Failed to ping batch bo\n");