aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c293
1 files changed, 224 insertions, 69 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9e35dafc5807..4200c32407ec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -176,7 +176,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
176 176
177 pinned = 0; 177 pinned = 0;
178 mutex_lock(&dev->struct_mutex); 178 mutex_lock(&dev->struct_mutex);
179 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 179 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
180 if (obj->pin_count) 180 if (obj->pin_count)
181 pinned += obj->gtt_space->size; 181 pinned += obj->gtt_space->size;
182 mutex_unlock(&dev->struct_mutex); 182 mutex_unlock(&dev->struct_mutex);
@@ -956,7 +956,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
956 956
957 ret = 0; 957 ret = 0;
958 if (seqno == ring->outstanding_lazy_request) 958 if (seqno == ring->outstanding_lazy_request)
959 ret = i915_add_request(ring, NULL, NULL); 959 ret = i915_add_request(ring, NULL);
960 960
961 return ret; 961 return ret;
962} 962}
@@ -1087,6 +1087,25 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1087 interruptible, NULL); 1087 interruptible, NULL);
1088} 1088}
1089 1089
1090static int
1091i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1092 struct intel_ring_buffer *ring)
1093{
1094 i915_gem_retire_requests_ring(ring);
1095
1096 /* Manually manage the write flush as we may have not yet
1097 * retired the buffer.
1098 *
1099 * Note that the last_write_seqno is always the earlier of
1100 * the two (read/write) seqno, so if we haved successfully waited,
1101 * we know we have passed the last write.
1102 */
1103 obj->last_write_seqno = 0;
1104 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1105
1106 return 0;
1107}
1108
1090/** 1109/**
1091 * Ensures that all rendering to the object has completed and the object is 1110 * Ensures that all rendering to the object has completed and the object is
1092 * safe to unbind from the GTT or access from the CPU. 1111 * safe to unbind from the GTT or access from the CPU.
@@ -1107,18 +1126,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1107 if (ret) 1126 if (ret)
1108 return ret; 1127 return ret;
1109 1128
1110 i915_gem_retire_requests_ring(ring); 1129 return i915_gem_object_wait_rendering__tail(obj, ring);
1111
1112 /* Manually manage the write flush as we may have not yet
1113 * retired the buffer.
1114 */
1115 if (obj->last_write_seqno &&
1116 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1117 obj->last_write_seqno = 0;
1118 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1119 }
1120
1121 return 0;
1122} 1130}
1123 1131
1124/* A nonblocking variant of the above wait. This is a highly dangerous routine 1132/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1154,19 +1162,10 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1154 mutex_unlock(&dev->struct_mutex); 1162 mutex_unlock(&dev->struct_mutex);
1155 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1163 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1156 mutex_lock(&dev->struct_mutex); 1164 mutex_lock(&dev->struct_mutex);
1165 if (ret)
1166 return ret;
1157 1167
1158 i915_gem_retire_requests_ring(ring); 1168 return i915_gem_object_wait_rendering__tail(obj, ring);
1159
1160 /* Manually manage the write flush as we may have not yet
1161 * retired the buffer.
1162 */
1163 if (obj->last_write_seqno &&
1164 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1165 obj->last_write_seqno = 0;
1166 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1167 }
1168
1169 return ret;
1170} 1169}
1171 1170
1172/** 1171/**
@@ -1676,7 +1675,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1676 /* ->put_pages might need to allocate memory for the bit17 swizzle 1675 /* ->put_pages might need to allocate memory for the bit17 swizzle
1677 * array, hence protect them from being reaped by removing them from gtt 1676 * array, hence protect them from being reaped by removing them from gtt
1678 * lists early. */ 1677 * lists early. */
1679 list_del(&obj->gtt_list); 1678 list_del(&obj->global_list);
1680 1679
1681 ops->put_pages(obj); 1680 ops->put_pages(obj);
1682 obj->pages = NULL; 1681 obj->pages = NULL;
@@ -1696,7 +1695,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1696 1695
1697 list_for_each_entry_safe(obj, next, 1696 list_for_each_entry_safe(obj, next,
1698 &dev_priv->mm.unbound_list, 1697 &dev_priv->mm.unbound_list,
1699 gtt_list) { 1698 global_list) {
1700 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && 1699 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1701 i915_gem_object_put_pages(obj) == 0) { 1700 i915_gem_object_put_pages(obj) == 0) {
1702 count += obj->base.size >> PAGE_SHIFT; 1701 count += obj->base.size >> PAGE_SHIFT;
@@ -1733,7 +1732,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1733 1732
1734 i915_gem_evict_everything(dev_priv->dev); 1733 i915_gem_evict_everything(dev_priv->dev);
1735 1734
1736 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) 1735 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1736 global_list)
1737 i915_gem_object_put_pages(obj); 1737 i915_gem_object_put_pages(obj);
1738} 1738}
1739 1739
@@ -1867,7 +1867,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1867 if (ret) 1867 if (ret)
1868 return ret; 1868 return ret;
1869 1869
1870 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); 1870 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1871 return 0; 1871 return 0;
1872} 1872}
1873 1873
@@ -2005,17 +2005,18 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2005 return 0; 2005 return 0;
2006} 2006}
2007 2007
2008int 2008int __i915_add_request(struct intel_ring_buffer *ring,
2009i915_add_request(struct intel_ring_buffer *ring, 2009 struct drm_file *file,
2010 struct drm_file *file, 2010 struct drm_i915_gem_object *obj,
2011 u32 *out_seqno) 2011 u32 *out_seqno)
2012{ 2012{
2013 drm_i915_private_t *dev_priv = ring->dev->dev_private; 2013 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2014 struct drm_i915_gem_request *request; 2014 struct drm_i915_gem_request *request;
2015 u32 request_ring_position; 2015 u32 request_ring_position, request_start;
2016 int was_empty; 2016 int was_empty;
2017 int ret; 2017 int ret;
2018 2018
2019 request_start = intel_ring_get_tail(ring);
2019 /* 2020 /*
2020 * Emit any outstanding flushes - execbuf can fail to emit the flush 2021 * Emit any outstanding flushes - execbuf can fail to emit the flush
2021 * after having emitted the batchbuffer command. Hence we need to fix 2022 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2047,7 +2048,21 @@ i915_add_request(struct intel_ring_buffer *ring,
2047 2048
2048 request->seqno = intel_ring_get_seqno(ring); 2049 request->seqno = intel_ring_get_seqno(ring);
2049 request->ring = ring; 2050 request->ring = ring;
2051 request->head = request_start;
2050 request->tail = request_ring_position; 2052 request->tail = request_ring_position;
2053 request->ctx = ring->last_context;
2054 request->batch_obj = obj;
2055
2056 /* Whilst this request exists, batch_obj will be on the
2057 * active_list, and so will hold the active reference. Only when this
2058 * request is retired will the the batch_obj be moved onto the
2059 * inactive_list and lose its active reference. Hence we do not need
2060 * to explicitly hold another reference here.
2061 */
2062
2063 if (request->ctx)
2064 i915_gem_context_reference(request->ctx);
2065
2051 request->emitted_jiffies = jiffies; 2066 request->emitted_jiffies = jiffies;
2052 was_empty = list_empty(&ring->request_list); 2067 was_empty = list_empty(&ring->request_list);
2053 list_add_tail(&request->list, &ring->request_list); 2068 list_add_tail(&request->list, &ring->request_list);
@@ -2100,9 +2115,114 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2100 spin_unlock(&file_priv->mm.lock); 2115 spin_unlock(&file_priv->mm.lock);
2101} 2116}
2102 2117
2118static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
2119{
2120 if (acthd >= obj->gtt_offset &&
2121 acthd < obj->gtt_offset + obj->base.size)
2122 return true;
2123
2124 return false;
2125}
2126
2127static bool i915_head_inside_request(const u32 acthd_unmasked,
2128 const u32 request_start,
2129 const u32 request_end)
2130{
2131 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2132
2133 if (request_start < request_end) {
2134 if (acthd >= request_start && acthd < request_end)
2135 return true;
2136 } else if (request_start > request_end) {
2137 if (acthd >= request_start || acthd < request_end)
2138 return true;
2139 }
2140
2141 return false;
2142}
2143
2144static bool i915_request_guilty(struct drm_i915_gem_request *request,
2145 const u32 acthd, bool *inside)
2146{
2147 /* There is a possibility that unmasked head address
2148 * pointing inside the ring, matches the batch_obj address range.
2149 * However this is extremely unlikely.
2150 */
2151
2152 if (request->batch_obj) {
2153 if (i915_head_inside_object(acthd, request->batch_obj)) {
2154 *inside = true;
2155 return true;
2156 }
2157 }
2158
2159 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2160 *inside = false;
2161 return true;
2162 }
2163
2164 return false;
2165}
2166
2167static void i915_set_reset_status(struct intel_ring_buffer *ring,
2168 struct drm_i915_gem_request *request,
2169 u32 acthd)
2170{
2171 struct i915_ctx_hang_stats *hs = NULL;
2172 bool inside, guilty;
2173
2174 /* Innocent until proven guilty */
2175 guilty = false;
2176
2177 if (ring->hangcheck.action != wait &&
2178 i915_request_guilty(request, acthd, &inside)) {
2179 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
2180 ring->name,
2181 inside ? "inside" : "flushing",
2182 request->batch_obj ?
2183 request->batch_obj->gtt_offset : 0,
2184 request->ctx ? request->ctx->id : 0,
2185 acthd);
2186
2187 guilty = true;
2188 }
2189
2190 /* If contexts are disabled or this is the default context, use
2191 * file_priv->reset_state
2192 */
2193 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2194 hs = &request->ctx->hang_stats;
2195 else if (request->file_priv)
2196 hs = &request->file_priv->hang_stats;
2197
2198 if (hs) {
2199 if (guilty)
2200 hs->batch_active++;
2201 else
2202 hs->batch_pending++;
2203 }
2204}
2205
2206static void i915_gem_free_request(struct drm_i915_gem_request *request)
2207{
2208 list_del(&request->list);
2209 i915_gem_request_remove_from_client(request);
2210
2211 if (request->ctx)
2212 i915_gem_context_unreference(request->ctx);
2213
2214 kfree(request);
2215}
2216
2103static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2217static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2104 struct intel_ring_buffer *ring) 2218 struct intel_ring_buffer *ring)
2105{ 2219{
2220 u32 completed_seqno;
2221 u32 acthd;
2222
2223 acthd = intel_ring_get_active_head(ring);
2224 completed_seqno = ring->get_seqno(ring, false);
2225
2106 while (!list_empty(&ring->request_list)) { 2226 while (!list_empty(&ring->request_list)) {
2107 struct drm_i915_gem_request *request; 2227 struct drm_i915_gem_request *request;
2108 2228
@@ -2110,9 +2230,10 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2110 struct drm_i915_gem_request, 2230 struct drm_i915_gem_request,
2111 list); 2231 list);
2112 2232
2113 list_del(&request->list); 2233 if (request->seqno > completed_seqno)
2114 i915_gem_request_remove_from_client(request); 2234 i915_set_reset_status(ring, request, acthd);
2115 kfree(request); 2235
2236 i915_gem_free_request(request);
2116 } 2237 }
2117 2238
2118 while (!list_empty(&ring->active_list)) { 2239 while (!list_empty(&ring->active_list)) {
@@ -2193,9 +2314,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2193 */ 2314 */
2194 ring->last_retired_head = request->tail; 2315 ring->last_retired_head = request->tail;
2195 2316
2196 list_del(&request->list); 2317 i915_gem_free_request(request);
2197 i915_gem_request_remove_from_client(request);
2198 kfree(request);
2199 } 2318 }
2200 2319
2201 /* Move any buffers on the active list that are no longer referenced 2320 /* Move any buffers on the active list that are no longer referenced
@@ -2262,7 +2381,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
2262 idle = true; 2381 idle = true;
2263 for_each_ring(ring, dev_priv, i) { 2382 for_each_ring(ring, dev_priv, i) {
2264 if (ring->gpu_caches_dirty) 2383 if (ring->gpu_caches_dirty)
2265 i915_add_request(ring, NULL, NULL); 2384 i915_add_request(ring, NULL);
2266 2385
2267 idle &= list_empty(&ring->request_list); 2386 idle &= list_empty(&ring->request_list);
2268 } 2387 }
@@ -2494,9 +2613,10 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2494 obj->has_aliasing_ppgtt_mapping = 0; 2613 obj->has_aliasing_ppgtt_mapping = 0;
2495 } 2614 }
2496 i915_gem_gtt_finish_object(obj); 2615 i915_gem_gtt_finish_object(obj);
2616 i915_gem_object_unpin_pages(obj);
2497 2617
2498 list_del(&obj->mm_list); 2618 list_del(&obj->mm_list);
2499 list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); 2619 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2500 /* Avoid an unnecessary call to unbind on rebind. */ 2620 /* Avoid an unnecessary call to unbind on rebind. */
2501 obj->map_and_fenceable = true; 2621 obj->map_and_fenceable = true;
2502 2622
@@ -2676,18 +2796,33 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
2676 return fence - dev_priv->fence_regs; 2796 return fence - dev_priv->fence_regs;
2677} 2797}
2678 2798
2799struct write_fence {
2800 struct drm_device *dev;
2801 struct drm_i915_gem_object *obj;
2802 int fence;
2803};
2804
2679static void i915_gem_write_fence__ipi(void *data) 2805static void i915_gem_write_fence__ipi(void *data)
2680{ 2806{
2807 struct write_fence *args = data;
2808
2809 /* Required for SNB+ with LLC */
2681 wbinvd(); 2810 wbinvd();
2811
2812 /* Required for VLV */
2813 i915_gem_write_fence(args->dev, args->fence, args->obj);
2682} 2814}
2683 2815
2684static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 2816static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2685 struct drm_i915_fence_reg *fence, 2817 struct drm_i915_fence_reg *fence,
2686 bool enable) 2818 bool enable)
2687{ 2819{
2688 struct drm_device *dev = obj->base.dev; 2820 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2689 struct drm_i915_private *dev_priv = dev->dev_private; 2821 struct write_fence args = {
2690 int fence_reg = fence_number(dev_priv, fence); 2822 .dev = obj->base.dev,
2823 .fence = fence_number(dev_priv, fence),
2824 .obj = enable ? obj : NULL,
2825 };
2691 2826
2692 /* In order to fully serialize access to the fenced region and 2827 /* In order to fully serialize access to the fenced region and
2693 * the update to the fence register we need to take extreme 2828 * the update to the fence register we need to take extreme
@@ -2698,13 +2833,19 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2698 * SNB+ we need to take a step further and emit an explicit wbinvd() 2833 * SNB+ we need to take a step further and emit an explicit wbinvd()
2699 * on each processor in order to manually flush all memory 2834 * on each processor in order to manually flush all memory
2700 * transactions before updating the fence register. 2835 * transactions before updating the fence register.
2836 *
2837 * However, Valleyview complicates matter. There the wbinvd is
2838 * insufficient and unlike SNB/IVB requires the serialising
2839 * register write. (Note that that register write by itself is
2840 * conversely not sufficient for SNB+.) To compromise, we do both.
2701 */ 2841 */
2702 if (HAS_LLC(obj->base.dev)) 2842 if (INTEL_INFO(args.dev)->gen >= 6)
2703 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); 2843 on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
2704 i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); 2844 else
2845 i915_gem_write_fence(args.dev, args.fence, args.obj);
2705 2846
2706 if (enable) { 2847 if (enable) {
2707 obj->fence_reg = fence_reg; 2848 obj->fence_reg = args.fence;
2708 fence->obj = obj; 2849 fence->obj = obj;
2709 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 2850 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2710 } else { 2851 } else {
@@ -2883,7 +3024,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
2883 struct drm_i915_gem_object *obj; 3024 struct drm_i915_gem_object *obj;
2884 int err = 0; 3025 int err = 0;
2885 3026
2886 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 3027 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
2887 if (obj->gtt_space == NULL) { 3028 if (obj->gtt_space == NULL) {
2888 printk(KERN_ERR "object found on GTT list with no space reserved\n"); 3029 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2889 err++; 3030 err++;
@@ -2930,6 +3071,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2930 struct drm_mm_node *node; 3071 struct drm_mm_node *node;
2931 u32 size, fence_size, fence_alignment, unfenced_alignment; 3072 u32 size, fence_size, fence_alignment, unfenced_alignment;
2932 bool mappable, fenceable; 3073 bool mappable, fenceable;
3074 size_t gtt_max = map_and_fenceable ?
3075 dev_priv->gtt.mappable_end : dev_priv->gtt.total;
2933 int ret; 3076 int ret;
2934 3077
2935 fence_size = i915_gem_get_gtt_size(dev, 3078 fence_size = i915_gem_get_gtt_size(dev,
@@ -2956,9 +3099,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2956 /* If the object is bigger than the entire aperture, reject it early 3099 /* If the object is bigger than the entire aperture, reject it early
2957 * before evicting everything in a vain attempt to find space. 3100 * before evicting everything in a vain attempt to find space.
2958 */ 3101 */
2959 if (obj->base.size > 3102 if (obj->base.size > gtt_max) {
2960 (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { 3103 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
2961 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 3104 obj->base.size,
3105 map_and_fenceable ? "mappable" : "total",
3106 gtt_max);
2962 return -E2BIG; 3107 return -E2BIG;
2963 } 3108 }
2964 3109
@@ -2974,14 +3119,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2974 return -ENOMEM; 3119 return -ENOMEM;
2975 } 3120 }
2976 3121
2977 search_free: 3122search_free:
2978 if (map_and_fenceable) 3123 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2979 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 3124 size, alignment,
2980 size, alignment, obj->cache_level, 3125 obj->cache_level, 0, gtt_max);
2981 0, dev_priv->gtt.mappable_end);
2982 else
2983 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2984 size, alignment, obj->cache_level);
2985 if (ret) { 3126 if (ret) {
2986 ret = i915_gem_evict_something(dev, size, alignment, 3127 ret = i915_gem_evict_something(dev, size, alignment,
2987 obj->cache_level, 3128 obj->cache_level,
@@ -3007,7 +3148,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3007 return ret; 3148 return ret;
3008 } 3149 }
3009 3150
3010 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 3151 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3011 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3152 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3012 3153
3013 obj->gtt_space = node; 3154 obj->gtt_space = node;
@@ -3022,7 +3163,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3022 3163
3023 obj->map_and_fenceable = mappable && fenceable; 3164 obj->map_and_fenceable = mappable && fenceable;
3024 3165
3025 i915_gem_object_unpin_pages(obj);
3026 trace_i915_gem_object_bind(obj, map_and_fenceable); 3166 trace_i915_gem_object_bind(obj, map_and_fenceable);
3027 i915_gem_verify_gtt(dev); 3167 i915_gem_verify_gtt(dev);
3028 return 0; 3168 return 0;
@@ -3722,7 +3862,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
3722 const struct drm_i915_gem_object_ops *ops) 3862 const struct drm_i915_gem_object_ops *ops)
3723{ 3863{
3724 INIT_LIST_HEAD(&obj->mm_list); 3864 INIT_LIST_HEAD(&obj->mm_list);
3725 INIT_LIST_HEAD(&obj->gtt_list); 3865 INIT_LIST_HEAD(&obj->global_list);
3726 INIT_LIST_HEAD(&obj->ring_list); 3866 INIT_LIST_HEAD(&obj->ring_list);
3727 INIT_LIST_HEAD(&obj->exec_list); 3867 INIT_LIST_HEAD(&obj->exec_list);
3728 3868
@@ -3822,7 +3962,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3822 dev_priv->mm.interruptible = was_interruptible; 3962 dev_priv->mm.interruptible = was_interruptible;
3823 } 3963 }
3824 3964
3825 obj->pages_pin_count = 0; 3965 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
3966 * before progressing. */
3967 if (obj->stolen)
3968 i915_gem_object_unpin_pages(obj);
3969
3970 if (WARN_ON(obj->pages_pin_count))
3971 obj->pages_pin_count = 0;
3826 i915_gem_object_put_pages(obj); 3972 i915_gem_object_put_pages(obj);
3827 i915_gem_object_free_mmap_offset(obj); 3973 i915_gem_object_free_mmap_offset(obj);
3828 i915_gem_object_release_stolen(obj); 3974 i915_gem_object_release_stolen(obj);
@@ -3973,12 +4119,21 @@ static int i915_gem_init_rings(struct drm_device *dev)
3973 goto cleanup_bsd_ring; 4119 goto cleanup_bsd_ring;
3974 } 4120 }
3975 4121
4122 if (HAS_VEBOX(dev)) {
4123 ret = intel_init_vebox_ring_buffer(dev);
4124 if (ret)
4125 goto cleanup_blt_ring;
4126 }
4127
4128
3976 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 4129 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
3977 if (ret) 4130 if (ret)
3978 goto cleanup_blt_ring; 4131 goto cleanup_vebox_ring;
3979 4132
3980 return 0; 4133 return 0;
3981 4134
4135cleanup_vebox_ring:
4136 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
3982cleanup_blt_ring: 4137cleanup_blt_ring:
3983 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); 4138 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
3984cleanup_bsd_ring: 4139cleanup_bsd_ring:
@@ -4453,10 +4608,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4453 } 4608 }
4454 4609
4455 cnt = 0; 4610 cnt = 0;
4456 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) 4611 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4457 if (obj->pages_pin_count == 0) 4612 if (obj->pages_pin_count == 0)
4458 cnt += obj->base.size >> PAGE_SHIFT; 4613 cnt += obj->base.size >> PAGE_SHIFT;
4459 list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) 4614 list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
4460 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4615 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4461 cnt += obj->base.size >> PAGE_SHIFT; 4616 cnt += obj->base.size >> PAGE_SHIFT;
4462 4617