aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c231
1 files changed, 2 insertions, 229 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 45b998218d0c..b5a7b00264a6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -49,9 +49,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 50 unsigned alignment);
51static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 51static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
52static int i915_gem_evict_something(struct drm_device *dev, int min_size,
53 unsigned alignment);
54static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
55static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 52static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
56 struct drm_i915_gem_pwrite *args, 53 struct drm_i915_gem_pwrite *args,
57 struct drm_file *file_priv); 54 struct drm_file *file_priv);
@@ -1885,19 +1882,6 @@ i915_gem_flush(struct drm_device *dev,
1885 flush_domains); 1882 flush_domains);
1886} 1883}
1887 1884
1888static void
1889i915_gem_flush_ring(struct drm_device *dev,
1890 uint32_t invalidate_domains,
1891 uint32_t flush_domains,
1892 struct intel_ring_buffer *ring)
1893{
1894 if (flush_domains & I915_GEM_DOMAIN_CPU)
1895 drm_agp_chipset_flush(dev);
1896 ring->flush(dev, ring,
1897 invalidate_domains,
1898 flush_domains);
1899}
1900
1901/** 1885/**
1902 * Ensures that all rendering to the object has completed and the object is 1886 * Ensures that all rendering to the object has completed and the object is
1903 * safe to unbind from the GTT or access from the CPU. 1887 * safe to unbind from the GTT or access from the CPU.
@@ -2008,53 +1992,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2008 return ret; 1992 return ret;
2009} 1993}
2010 1994
2011static int 1995int
2012i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
2013 unsigned alignment, int *found)
2014{
2015 drm_i915_private_t *dev_priv = dev->dev_private;
2016 struct drm_gem_object *obj;
2017 struct drm_i915_gem_object *obj_priv;
2018 struct drm_gem_object *best = NULL;
2019 struct drm_gem_object *first = NULL;
2020
2021 /* Try to find the smallest clean object */
2022 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2023 struct drm_gem_object *obj = &obj_priv->base;
2024 if (obj->size >= min_size) {
2025 if ((!obj_priv->dirty ||
2026 i915_gem_object_is_purgeable(obj_priv)) &&
2027 (!best || obj->size < best->size)) {
2028 best = obj;
2029 if (best->size == min_size)
2030 break;
2031 }
2032 if (!first)
2033 first = obj;
2034 }
2035 }
2036
2037 obj = best ? best : first;
2038
2039 if (!obj) {
2040 *found = 0;
2041 return 0;
2042 }
2043
2044 *found = 1;
2045
2046#if WATCH_LRU
2047 DRM_INFO("%s: evicting %p\n", __func__, obj);
2048#endif
2049 obj_priv = to_intel_bo(obj);
2050 BUG_ON(obj_priv->pin_count != 0);
2051 BUG_ON(obj_priv->active);
2052
2053 /* Wait on the rendering and unbind the buffer. */
2054 return i915_gem_object_unbind(obj);
2055}
2056
2057static int
2058i915_gpu_idle(struct drm_device *dev) 1996i915_gpu_idle(struct drm_device *dev)
2059{ 1997{
2060 drm_i915_private_t *dev_priv = dev->dev_private; 1998 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2095,147 +2033,6 @@ i915_gpu_idle(struct drm_device *dev)
2095 return ret; 2033 return ret;
2096} 2034}
2097 2035
2098static int
2099i915_gem_evict_everything(struct drm_device *dev)
2100{
2101 drm_i915_private_t *dev_priv = dev->dev_private;
2102 int ret;
2103 bool lists_empty;
2104
2105 spin_lock(&dev_priv->mm.active_list_lock);
2106 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2107 list_empty(&dev_priv->mm.flushing_list) &&
2108 list_empty(&dev_priv->render_ring.active_list) &&
2109 (!HAS_BSD(dev)
2110 || list_empty(&dev_priv->bsd_ring.active_list)));
2111 spin_unlock(&dev_priv->mm.active_list_lock);
2112
2113 if (lists_empty)
2114 return -ENOSPC;
2115
2116 /* Flush everything (on to the inactive lists) and evict */
2117 ret = i915_gpu_idle(dev);
2118 if (ret)
2119 return ret;
2120
2121 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2122
2123 ret = i915_gem_evict_from_inactive_list(dev);
2124 if (ret)
2125 return ret;
2126
2127 spin_lock(&dev_priv->mm.active_list_lock);
2128 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2129 list_empty(&dev_priv->mm.flushing_list) &&
2130 list_empty(&dev_priv->render_ring.active_list) &&
2131 (!HAS_BSD(dev)
2132 || list_empty(&dev_priv->bsd_ring.active_list)));
2133 spin_unlock(&dev_priv->mm.active_list_lock);
2134 BUG_ON(!lists_empty);
2135
2136 return 0;
2137}
2138
2139static int
2140i915_gem_evict_something(struct drm_device *dev,
2141 int min_size, unsigned alignment)
2142{
2143 drm_i915_private_t *dev_priv = dev->dev_private;
2144 int ret, found;
2145
2146 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2147 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2148 for (;;) {
2149 i915_gem_retire_requests(dev);
2150
2151 /* If there's an inactive buffer available now, grab it
2152 * and be done.
2153 */
2154 ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
2155 alignment,
2156 &found);
2157 if (found)
2158 return ret;
2159
2160 /* If we didn't get anything, but the ring is still processing
2161 * things, wait for the next to finish and hopefully leave us
2162 * a buffer to evict.
2163 */
2164 if (!list_empty(&render_ring->request_list)) {
2165 struct drm_i915_gem_request *request;
2166
2167 request = list_first_entry(&render_ring->request_list,
2168 struct drm_i915_gem_request,
2169 list);
2170
2171 ret = i915_wait_request(dev,
2172 request->seqno, request->ring);
2173 if (ret)
2174 return ret;
2175
2176 continue;
2177 }
2178
2179 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
2180 struct drm_i915_gem_request *request;
2181
2182 request = list_first_entry(&bsd_ring->request_list,
2183 struct drm_i915_gem_request,
2184 list);
2185
2186 ret = i915_wait_request(dev,
2187 request->seqno, request->ring);
2188 if (ret)
2189 return ret;
2190
2191 continue;
2192 }
2193
2194 /* If we didn't have anything on the request list but there
2195 * are buffers awaiting a flush, emit one and try again.
2196 * When we wait on it, those buffers waiting for that flush
2197 * will get moved to inactive.
2198 */
2199 if (!list_empty(&dev_priv->mm.flushing_list)) {
2200 struct drm_gem_object *obj = NULL;
2201 struct drm_i915_gem_object *obj_priv;
2202
2203 /* Find an object that we can immediately reuse */
2204 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2205 obj = &obj_priv->base;
2206 if (obj->size >= min_size)
2207 break;
2208
2209 obj = NULL;
2210 }
2211
2212 if (obj != NULL) {
2213 uint32_t seqno;
2214
2215 i915_gem_flush_ring(dev,
2216 obj->write_domain,
2217 obj->write_domain,
2218 obj_priv->ring);
2219 seqno = i915_add_request(dev, NULL,
2220 obj->write_domain,
2221 obj_priv->ring);
2222 if (seqno == 0)
2223 return -ENOMEM;
2224 continue;
2225 }
2226 }
2227
2228 /* If we didn't do any of the above, there's no single buffer
2229 * large enough to swap out for the new one, so just evict
2230 * everything and start again. (This should be rare.)
2231 */
2232 if (!list_empty (&dev_priv->mm.inactive_list))
2233 return i915_gem_evict_from_inactive_list(dev);
2234 else
2235 return i915_gem_evict_everything(dev);
2236 }
2237}
2238
2239int 2036int
2240i915_gem_object_get_pages(struct drm_gem_object *obj, 2037i915_gem_object_get_pages(struct drm_gem_object *obj,
2241 gfp_t gfpmask) 2038 gfp_t gfpmask)
@@ -4548,30 +4345,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4548 i915_gem_free_object_tail(obj); 4345 i915_gem_free_object_tail(obj);
4549} 4346}
4550 4347
4551/** Unbinds all inactive objects. */
4552static int
4553i915_gem_evict_from_inactive_list(struct drm_device *dev)
4554{
4555 drm_i915_private_t *dev_priv = dev->dev_private;
4556
4557 while (!list_empty(&dev_priv->mm.inactive_list)) {
4558 struct drm_gem_object *obj;
4559 int ret;
4560
4561 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4562 struct drm_i915_gem_object,
4563 list)->base;
4564
4565 ret = i915_gem_object_unbind(obj);
4566 if (ret != 0) {
4567 DRM_ERROR("Error unbinding object: %d\n", ret);
4568 return ret;
4569 }
4570 }
4571
4572 return 0;
4573}
4574
4575int 4348int
4576i915_gem_idle(struct drm_device *dev) 4349i915_gem_idle(struct drm_device *dev)
4577{ 4350{
@@ -4596,7 +4369,7 @@ i915_gem_idle(struct drm_device *dev)
4596 4369
4597 /* Under UMS, be paranoid and evict. */ 4370 /* Under UMS, be paranoid and evict. */
4598 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 4371 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4599 ret = i915_gem_evict_from_inactive_list(dev); 4372 ret = i915_gem_evict_inactive(dev);
4600 if (ret) { 4373 if (ret) {
4601 mutex_unlock(&dev->struct_mutex); 4374 mutex_unlock(&dev->struct_mutex);
4602 return ret; 4375 return ret;