diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 154 |
1 files changed, 134 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5b6a92e7102..a6178baccb56 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -176,7 +176,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
176 | 176 | ||
177 | pinned = 0; | 177 | pinned = 0; |
178 | mutex_lock(&dev->struct_mutex); | 178 | mutex_lock(&dev->struct_mutex); |
179 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) | 179 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
180 | if (obj->pin_count) | 180 | if (obj->pin_count) |
181 | pinned += obj->gtt_space->size; | 181 | pinned += obj->gtt_space->size; |
182 | mutex_unlock(&dev->struct_mutex); | 182 | mutex_unlock(&dev->struct_mutex); |
@@ -956,7 +956,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) | |||
956 | 956 | ||
957 | ret = 0; | 957 | ret = 0; |
958 | if (seqno == ring->outstanding_lazy_request) | 958 | if (seqno == ring->outstanding_lazy_request) |
959 | ret = i915_add_request(ring, NULL, NULL); | 959 | ret = i915_add_request(ring, NULL); |
960 | 960 | ||
961 | return ret; | 961 | return ret; |
962 | } | 962 | } |
@@ -1676,7 +1676,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) | |||
1676 | /* ->put_pages might need to allocate memory for the bit17 swizzle | 1676 | /* ->put_pages might need to allocate memory for the bit17 swizzle |
1677 | * array, hence protect them from being reaped by removing them from gtt | 1677 | * array, hence protect them from being reaped by removing them from gtt |
1678 | * lists early. */ | 1678 | * lists early. */ |
1679 | list_del(&obj->gtt_list); | 1679 | list_del(&obj->global_list); |
1680 | 1680 | ||
1681 | ops->put_pages(obj); | 1681 | ops->put_pages(obj); |
1682 | obj->pages = NULL; | 1682 | obj->pages = NULL; |
@@ -1696,7 +1696,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, | |||
1696 | 1696 | ||
1697 | list_for_each_entry_safe(obj, next, | 1697 | list_for_each_entry_safe(obj, next, |
1698 | &dev_priv->mm.unbound_list, | 1698 | &dev_priv->mm.unbound_list, |
1699 | gtt_list) { | 1699 | global_list) { |
1700 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && | 1700 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && |
1701 | i915_gem_object_put_pages(obj) == 0) { | 1701 | i915_gem_object_put_pages(obj) == 0) { |
1702 | count += obj->base.size >> PAGE_SHIFT; | 1702 | count += obj->base.size >> PAGE_SHIFT; |
@@ -1733,7 +1733,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv) | |||
1733 | 1733 | ||
1734 | i915_gem_evict_everything(dev_priv->dev); | 1734 | i915_gem_evict_everything(dev_priv->dev); |
1735 | 1735 | ||
1736 | list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) | 1736 | list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, |
1737 | global_list) | ||
1737 | i915_gem_object_put_pages(obj); | 1738 | i915_gem_object_put_pages(obj); |
1738 | } | 1739 | } |
1739 | 1740 | ||
@@ -1858,7 +1859,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | |||
1858 | if (ret) | 1859 | if (ret) |
1859 | return ret; | 1860 | return ret; |
1860 | 1861 | ||
1861 | list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); | 1862 | list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
1862 | return 0; | 1863 | return 0; |
1863 | } | 1864 | } |
1864 | 1865 | ||
@@ -1996,17 +1997,18 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) | |||
1996 | return 0; | 1997 | return 0; |
1997 | } | 1998 | } |
1998 | 1999 | ||
1999 | int | 2000 | int __i915_add_request(struct intel_ring_buffer *ring, |
2000 | i915_add_request(struct intel_ring_buffer *ring, | 2001 | struct drm_file *file, |
2001 | struct drm_file *file, | 2002 | struct drm_i915_gem_object *obj, |
2002 | u32 *out_seqno) | 2003 | u32 *out_seqno) |
2003 | { | 2004 | { |
2004 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 2005 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
2005 | struct drm_i915_gem_request *request; | 2006 | struct drm_i915_gem_request *request; |
2006 | u32 request_ring_position; | 2007 | u32 request_ring_position, request_start; |
2007 | int was_empty; | 2008 | int was_empty; |
2008 | int ret; | 2009 | int ret; |
2009 | 2010 | ||
2011 | request_start = intel_ring_get_tail(ring); | ||
2010 | /* | 2012 | /* |
2011 | * Emit any outstanding flushes - execbuf can fail to emit the flush | 2013 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
2012 | * after having emitted the batchbuffer command. Hence we need to fix | 2014 | * after having emitted the batchbuffer command. Hence we need to fix |
@@ -2038,8 +2040,17 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
2038 | 2040 | ||
2039 | request->seqno = intel_ring_get_seqno(ring); | 2041 | request->seqno = intel_ring_get_seqno(ring); |
2040 | request->ring = ring; | 2042 | request->ring = ring; |
2043 | request->head = request_start; | ||
2041 | request->tail = request_ring_position; | 2044 | request->tail = request_ring_position; |
2042 | request->ctx = ring->last_context; | 2045 | request->ctx = ring->last_context; |
2046 | request->batch_obj = obj; | ||
2047 | |||
2048 | /* Whilst this request exists, batch_obj will be on the | ||
2049 | * active_list, and so will hold the active reference. Only when this | ||
2050 | * request is retired will the the batch_obj be moved onto the | ||
2051 | * inactive_list and lose its active reference. Hence we do not need | ||
2052 | * to explicitly hold another reference here. | ||
2053 | */ | ||
2043 | 2054 | ||
2044 | if (request->ctx) | 2055 | if (request->ctx) |
2045 | i915_gem_context_reference(request->ctx); | 2056 | i915_gem_context_reference(request->ctx); |
@@ -2096,6 +2107,94 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | |||
2096 | spin_unlock(&file_priv->mm.lock); | 2107 | spin_unlock(&file_priv->mm.lock); |
2097 | } | 2108 | } |
2098 | 2109 | ||
2110 | static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) | ||
2111 | { | ||
2112 | if (acthd >= obj->gtt_offset && | ||
2113 | acthd < obj->gtt_offset + obj->base.size) | ||
2114 | return true; | ||
2115 | |||
2116 | return false; | ||
2117 | } | ||
2118 | |||
2119 | static bool i915_head_inside_request(const u32 acthd_unmasked, | ||
2120 | const u32 request_start, | ||
2121 | const u32 request_end) | ||
2122 | { | ||
2123 | const u32 acthd = acthd_unmasked & HEAD_ADDR; | ||
2124 | |||
2125 | if (request_start < request_end) { | ||
2126 | if (acthd >= request_start && acthd < request_end) | ||
2127 | return true; | ||
2128 | } else if (request_start > request_end) { | ||
2129 | if (acthd >= request_start || acthd < request_end) | ||
2130 | return true; | ||
2131 | } | ||
2132 | |||
2133 | return false; | ||
2134 | } | ||
2135 | |||
2136 | static bool i915_request_guilty(struct drm_i915_gem_request *request, | ||
2137 | const u32 acthd, bool *inside) | ||
2138 | { | ||
2139 | /* There is a possibility that unmasked head address | ||
2140 | * pointing inside the ring, matches the batch_obj address range. | ||
2141 | * However this is extremely unlikely. | ||
2142 | */ | ||
2143 | |||
2144 | if (request->batch_obj) { | ||
2145 | if (i915_head_inside_object(acthd, request->batch_obj)) { | ||
2146 | *inside = true; | ||
2147 | return true; | ||
2148 | } | ||
2149 | } | ||
2150 | |||
2151 | if (i915_head_inside_request(acthd, request->head, request->tail)) { | ||
2152 | *inside = false; | ||
2153 | return true; | ||
2154 | } | ||
2155 | |||
2156 | return false; | ||
2157 | } | ||
2158 | |||
2159 | static void i915_set_reset_status(struct intel_ring_buffer *ring, | ||
2160 | struct drm_i915_gem_request *request, | ||
2161 | u32 acthd) | ||
2162 | { | ||
2163 | struct i915_ctx_hang_stats *hs = NULL; | ||
2164 | bool inside, guilty; | ||
2165 | |||
2166 | /* Innocent until proven guilty */ | ||
2167 | guilty = false; | ||
2168 | |||
2169 | if (ring->hangcheck.action != wait && | ||
2170 | i915_request_guilty(request, acthd, &inside)) { | ||
2171 | DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", | ||
2172 | ring->name, | ||
2173 | inside ? "inside" : "flushing", | ||
2174 | request->batch_obj ? | ||
2175 | request->batch_obj->gtt_offset : 0, | ||
2176 | request->ctx ? request->ctx->id : 0, | ||
2177 | acthd); | ||
2178 | |||
2179 | guilty = true; | ||
2180 | } | ||
2181 | |||
2182 | /* If contexts are disabled or this is the default context, use | ||
2183 | * file_priv->reset_state | ||
2184 | */ | ||
2185 | if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID) | ||
2186 | hs = &request->ctx->hang_stats; | ||
2187 | else if (request->file_priv) | ||
2188 | hs = &request->file_priv->hang_stats; | ||
2189 | |||
2190 | if (hs) { | ||
2191 | if (guilty) | ||
2192 | hs->batch_active++; | ||
2193 | else | ||
2194 | hs->batch_pending++; | ||
2195 | } | ||
2196 | } | ||
2197 | |||
2099 | static void i915_gem_free_request(struct drm_i915_gem_request *request) | 2198 | static void i915_gem_free_request(struct drm_i915_gem_request *request) |
2100 | { | 2199 | { |
2101 | list_del(&request->list); | 2200 | list_del(&request->list); |
@@ -2110,6 +2209,12 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) | |||
2110 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | 2209 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, |
2111 | struct intel_ring_buffer *ring) | 2210 | struct intel_ring_buffer *ring) |
2112 | { | 2211 | { |
2212 | u32 completed_seqno; | ||
2213 | u32 acthd; | ||
2214 | |||
2215 | acthd = intel_ring_get_active_head(ring); | ||
2216 | completed_seqno = ring->get_seqno(ring, false); | ||
2217 | |||
2113 | while (!list_empty(&ring->request_list)) { | 2218 | while (!list_empty(&ring->request_list)) { |
2114 | struct drm_i915_gem_request *request; | 2219 | struct drm_i915_gem_request *request; |
2115 | 2220 | ||
@@ -2117,6 +2222,9 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
2117 | struct drm_i915_gem_request, | 2222 | struct drm_i915_gem_request, |
2118 | list); | 2223 | list); |
2119 | 2224 | ||
2225 | if (request->seqno > completed_seqno) | ||
2226 | i915_set_reset_status(ring, request, acthd); | ||
2227 | |||
2120 | i915_gem_free_request(request); | 2228 | i915_gem_free_request(request); |
2121 | } | 2229 | } |
2122 | 2230 | ||
@@ -2276,7 +2384,7 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
2276 | idle = true; | 2384 | idle = true; |
2277 | for_each_ring(ring, dev_priv, i) { | 2385 | for_each_ring(ring, dev_priv, i) { |
2278 | if (ring->gpu_caches_dirty) | 2386 | if (ring->gpu_caches_dirty) |
2279 | i915_add_request(ring, NULL, NULL); | 2387 | i915_add_request(ring, NULL); |
2280 | 2388 | ||
2281 | idle &= list_empty(&ring->request_list); | 2389 | idle &= list_empty(&ring->request_list); |
2282 | } | 2390 | } |
@@ -2508,9 +2616,10 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2508 | obj->has_aliasing_ppgtt_mapping = 0; | 2616 | obj->has_aliasing_ppgtt_mapping = 0; |
2509 | } | 2617 | } |
2510 | i915_gem_gtt_finish_object(obj); | 2618 | i915_gem_gtt_finish_object(obj); |
2619 | i915_gem_object_unpin_pages(obj); | ||
2511 | 2620 | ||
2512 | list_del(&obj->mm_list); | 2621 | list_del(&obj->mm_list); |
2513 | list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); | 2622 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
2514 | /* Avoid an unnecessary call to unbind on rebind. */ | 2623 | /* Avoid an unnecessary call to unbind on rebind. */ |
2515 | obj->map_and_fenceable = true; | 2624 | obj->map_and_fenceable = true; |
2516 | 2625 | ||
@@ -2918,7 +3027,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev) | |||
2918 | struct drm_i915_gem_object *obj; | 3027 | struct drm_i915_gem_object *obj; |
2919 | int err = 0; | 3028 | int err = 0; |
2920 | 3029 | ||
2921 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | 3030 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) { |
2922 | if (obj->gtt_space == NULL) { | 3031 | if (obj->gtt_space == NULL) { |
2923 | printk(KERN_ERR "object found on GTT list with no space reserved\n"); | 3032 | printk(KERN_ERR "object found on GTT list with no space reserved\n"); |
2924 | err++; | 3033 | err++; |
@@ -3042,7 +3151,7 @@ search_free: | |||
3042 | return ret; | 3151 | return ret; |
3043 | } | 3152 | } |
3044 | 3153 | ||
3045 | list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); | 3154 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
3046 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 3155 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
3047 | 3156 | ||
3048 | obj->gtt_space = node; | 3157 | obj->gtt_space = node; |
@@ -3057,7 +3166,6 @@ search_free: | |||
3057 | 3166 | ||
3058 | obj->map_and_fenceable = mappable && fenceable; | 3167 | obj->map_and_fenceable = mappable && fenceable; |
3059 | 3168 | ||
3060 | i915_gem_object_unpin_pages(obj); | ||
3061 | trace_i915_gem_object_bind(obj, map_and_fenceable); | 3169 | trace_i915_gem_object_bind(obj, map_and_fenceable); |
3062 | i915_gem_verify_gtt(dev); | 3170 | i915_gem_verify_gtt(dev); |
3063 | return 0; | 3171 | return 0; |
@@ -3757,7 +3865,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, | |||
3757 | const struct drm_i915_gem_object_ops *ops) | 3865 | const struct drm_i915_gem_object_ops *ops) |
3758 | { | 3866 | { |
3759 | INIT_LIST_HEAD(&obj->mm_list); | 3867 | INIT_LIST_HEAD(&obj->mm_list); |
3760 | INIT_LIST_HEAD(&obj->gtt_list); | 3868 | INIT_LIST_HEAD(&obj->global_list); |
3761 | INIT_LIST_HEAD(&obj->ring_list); | 3869 | INIT_LIST_HEAD(&obj->ring_list); |
3762 | INIT_LIST_HEAD(&obj->exec_list); | 3870 | INIT_LIST_HEAD(&obj->exec_list); |
3763 | 3871 | ||
@@ -3857,7 +3965,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
3857 | dev_priv->mm.interruptible = was_interruptible; | 3965 | dev_priv->mm.interruptible = was_interruptible; |
3858 | } | 3966 | } |
3859 | 3967 | ||
3860 | obj->pages_pin_count = 0; | 3968 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up |
3969 | * before progressing. */ | ||
3970 | if (obj->stolen) | ||
3971 | i915_gem_object_unpin_pages(obj); | ||
3972 | |||
3973 | if (WARN_ON(obj->pages_pin_count)) | ||
3974 | obj->pages_pin_count = 0; | ||
3861 | i915_gem_object_put_pages(obj); | 3975 | i915_gem_object_put_pages(obj); |
3862 | i915_gem_object_free_mmap_offset(obj); | 3976 | i915_gem_object_free_mmap_offset(obj); |
3863 | i915_gem_object_release_stolen(obj); | 3977 | i915_gem_object_release_stolen(obj); |
@@ -4498,10 +4612,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) | |||
4498 | } | 4612 | } |
4499 | 4613 | ||
4500 | cnt = 0; | 4614 | cnt = 0; |
4501 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) | 4615 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) |
4502 | if (obj->pages_pin_count == 0) | 4616 | if (obj->pages_pin_count == 0) |
4503 | cnt += obj->base.size >> PAGE_SHIFT; | 4617 | cnt += obj->base.size >> PAGE_SHIFT; |
4504 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) | 4618 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) |
4505 | if (obj->pin_count == 0 && obj->pages_pin_count == 0) | 4619 | if (obj->pin_count == 0 && obj->pages_pin_count == 0) |
4506 | cnt += obj->base.size >> PAGE_SHIFT; | 4620 | cnt += obj->base.size >> PAGE_SHIFT; |
4507 | 4621 | ||