diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2009-09-14 11:50:30 -0400 |
---|---|---|
committer | Jesse Barnes <jbarnes@virtuousgeek.org> | 2009-09-17 17:43:32 -0400 |
commit | 07f73f6912667621276b002e33844ef283d98203 (patch) | |
tree | 4da6860e7d4a165ef1fe233becc74005f9964ca2 | |
parent | 3ef94daae7530b4ebcd2e5f48f1028cd2d2470ba (diff) |
drm/i915: Improve behaviour under memory pressure
Due to the necessity of having to take the struct_mutex, the i915
shrinker can not free the inactive lists if we fail to allocate memory
whilst processing a batch buffer, triggering an OOM and an ENOMEM that
is reported back to userspace. In order to fare better under such
circumstances we need to manually retry a failed allocation after
evicting inactive buffers.
To do so involves 3 steps:
1. Marking the backing shm pages as NORETRY.
2. Updating the get_pages() callers to evict something on failure and then
retry.
3. Revamping the evict something logic to be smarter about the required
buffer size and prefer to use volatile or clean inactive pages.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 313 |
2 files changed, 245 insertions, 81 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 230c9ffdd5e9..80391995bdec 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
142 | if (IS_ERR(obj->filp)) | 142 | if (IS_ERR(obj->filp)) |
143 | goto free; | 143 | goto free; |
144 | 144 | ||
145 | /* Basically we want to disable the OOM killer and handle ENOMEM | ||
146 | * ourselves by sacrificing pages from cached buffers. | ||
147 | * XXX shmem_file_[gs]et_gfp_mask() | ||
148 | */ | ||
149 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
150 | GFP_HIGHUSER | | ||
151 | __GFP_COLD | | ||
152 | __GFP_FS | | ||
153 | __GFP_RECLAIMABLE | | ||
154 | __GFP_NORETRY | | ||
155 | __GFP_NOWARN | | ||
156 | __GFP_NOMEMALLOC); | ||
157 | |||
145 | kref_init(&obj->refcount); | 158 | kref_init(&obj->refcount); |
146 | kref_init(&obj->handlecount); | 159 | kref_init(&obj->handlecount); |
147 | obj->size = size; | 160 | obj->size = size; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2ab30f251943..725b4484a092 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -48,7 +48,9 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | |||
48 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 48 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
49 | unsigned alignment); | 49 | unsigned alignment); |
50 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 50 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); |
51 | static int i915_gem_evict_something(struct drm_device *dev); | 51 | static int i915_gem_evict_something(struct drm_device *dev, int min_size); |
52 | static int i915_gem_evict_from_list(struct drm_device *dev, | ||
53 | struct list_head *head); | ||
52 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 54 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
53 | struct drm_i915_gem_pwrite *args, | 55 | struct drm_i915_gem_pwrite *args, |
54 | struct drm_file *file_priv); | 56 | struct drm_file *file_priv); |
@@ -319,6 +321,45 @@ fail_unlock: | |||
319 | return ret; | 321 | return ret; |
320 | } | 322 | } |
321 | 323 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | ||
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | ||
338 | { | ||
339 | int ret; | ||
340 | |||
341 | ret = i915_gem_object_get_pages(obj); | ||
342 | |||
343 | /* If we've insufficient memory to map in the pages, attempt | ||
344 | * to make some space by throwing out some old buffers. | ||
345 | */ | ||
346 | if (ret == -ENOMEM) { | ||
347 | struct drm_device *dev = obj->dev; | ||
348 | gfp_t gfp; | ||
349 | |||
350 | ret = i915_gem_evict_something(dev, obj->size); | ||
351 | if (ret) | ||
352 | return ret; | ||
353 | |||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | ||
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | ||
359 | |||
360 | return ret; | ||
361 | } | ||
362 | |||
322 | /** | 363 | /** |
323 | * This is the fallback shmem pread path, which allocates temporary storage | 364 | * This is the fallback shmem pread path, which allocates temporary storage |
324 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | 365 | * in kernel space to copy_to_user into outside of the struct_mutex, so we |
@@ -370,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
370 | 411 | ||
371 | mutex_lock(&dev->struct_mutex); | 412 | mutex_lock(&dev->struct_mutex); |
372 | 413 | ||
373 | ret = i915_gem_object_get_pages(obj); | 414 | ret = i915_gem_object_get_pages_or_evict(obj); |
374 | if (ret != 0) | 415 | if (ret) |
375 | goto fail_unlock; | 416 | goto fail_unlock; |
376 | 417 | ||
377 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 418 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, |
@@ -845,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
845 | 886 | ||
846 | mutex_lock(&dev->struct_mutex); | 887 | mutex_lock(&dev->struct_mutex); |
847 | 888 | ||
848 | ret = i915_gem_object_get_pages(obj); | 889 | ret = i915_gem_object_get_pages_or_evict(obj); |
849 | if (ret != 0) | 890 | if (ret) |
850 | goto fail_unlock; | 891 | goto fail_unlock; |
851 | 892 | ||
852 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 893 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
@@ -1965,37 +2006,127 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1965 | return 0; | 2006 | return 0; |
1966 | } | 2007 | } |
1967 | 2008 | ||
2009 | static inline int | ||
2010 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | ||
2011 | { | ||
2012 | return !obj_priv->dirty || obj_priv->madv == I915_MADV_DONTNEED; | ||
2013 | } | ||
2014 | |||
2015 | static struct drm_gem_object * | ||
2016 | i915_gem_find_inactive_object(struct drm_device *dev, int min_size) | ||
2017 | { | ||
2018 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2019 | struct drm_i915_gem_object *obj_priv; | ||
2020 | struct drm_gem_object *best = NULL; | ||
2021 | struct drm_gem_object *first = NULL; | ||
2022 | |||
2023 | /* Try to find the smallest clean object */ | ||
2024 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
2025 | struct drm_gem_object *obj = obj_priv->obj; | ||
2026 | if (obj->size >= min_size) { | ||
2027 | if (i915_gem_object_is_purgeable(obj_priv) && | ||
2028 | (!best || obj->size < best->size)) { | ||
2029 | best = obj; | ||
2030 | if (best->size == min_size) | ||
2031 | return best; | ||
2032 | } | ||
2033 | if (!first) | ||
2034 | first = obj; | ||
2035 | } | ||
2036 | } | ||
2037 | |||
2038 | return best ? best : first; | ||
2039 | } | ||
2040 | |||
2041 | static int | ||
2042 | i915_gem_evict_everything(struct drm_device *dev) | ||
2043 | { | ||
2044 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2045 | uint32_t seqno; | ||
2046 | int ret; | ||
2047 | bool lists_empty; | ||
2048 | |||
2049 | DRM_INFO("GTT full, evicting everything: " | ||
2050 | "%d objects [%d pinned], " | ||
2051 | "%d object bytes [%d pinned], " | ||
2052 | "%d/%d gtt bytes\n", | ||
2053 | atomic_read(&dev->object_count), | ||
2054 | atomic_read(&dev->pin_count), | ||
2055 | atomic_read(&dev->object_memory), | ||
2056 | atomic_read(&dev->pin_memory), | ||
2057 | atomic_read(&dev->gtt_memory), | ||
2058 | dev->gtt_total); | ||
2059 | |||
2060 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2061 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2062 | list_empty(&dev_priv->mm.flushing_list) && | ||
2063 | list_empty(&dev_priv->mm.active_list)); | ||
2064 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2065 | |||
2066 | if (lists_empty) { | ||
2067 | DRM_ERROR("GTT full, but lists empty!\n"); | ||
2068 | return -ENOSPC; | ||
2069 | } | ||
2070 | |||
2071 | /* Flush everything (on to the inactive lists) and evict */ | ||
2072 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2073 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); | ||
2074 | if (seqno == 0) | ||
2075 | return -ENOMEM; | ||
2076 | |||
2077 | ret = i915_wait_request(dev, seqno); | ||
2078 | if (ret) | ||
2079 | return ret; | ||
2080 | |||
2081 | ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); | ||
2082 | if (ret) | ||
2083 | return ret; | ||
2084 | |||
2085 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2086 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2087 | list_empty(&dev_priv->mm.flushing_list) && | ||
2088 | list_empty(&dev_priv->mm.active_list)); | ||
2089 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2090 | BUG_ON(!lists_empty); | ||
2091 | |||
2092 | return 0; | ||
2093 | } | ||
2094 | |||
1968 | static int | 2095 | static int |
1969 | i915_gem_evict_something(struct drm_device *dev) | 2096 | i915_gem_evict_something(struct drm_device *dev, int min_size) |
1970 | { | 2097 | { |
1971 | drm_i915_private_t *dev_priv = dev->dev_private; | 2098 | drm_i915_private_t *dev_priv = dev->dev_private; |
1972 | struct drm_gem_object *obj; | 2099 | struct drm_gem_object *obj; |
1973 | struct drm_i915_gem_object *obj_priv; | 2100 | int have_waited = 0; |
1974 | int ret = 0; | 2101 | int ret; |
1975 | 2102 | ||
1976 | for (;;) { | 2103 | for (;;) { |
2104 | i915_gem_retire_requests(dev); | ||
2105 | |||
1977 | /* If there's an inactive buffer available now, grab it | 2106 | /* If there's an inactive buffer available now, grab it |
1978 | * and be done. | 2107 | * and be done. |
1979 | */ | 2108 | */ |
1980 | if (!list_empty(&dev_priv->mm.inactive_list)) { | 2109 | obj = i915_gem_find_inactive_object(dev, min_size); |
1981 | obj_priv = list_first_entry(&dev_priv->mm.inactive_list, | 2110 | if (obj) { |
1982 | struct drm_i915_gem_object, | 2111 | struct drm_i915_gem_object *obj_priv; |
1983 | list); | 2112 | |
1984 | obj = obj_priv->obj; | ||
1985 | BUG_ON(obj_priv->pin_count != 0); | ||
1986 | #if WATCH_LRU | 2113 | #if WATCH_LRU |
1987 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 2114 | DRM_INFO("%s: evicting %p\n", __func__, obj); |
1988 | #endif | 2115 | #endif |
2116 | obj_priv = obj->driver_private; | ||
2117 | BUG_ON(obj_priv->pin_count != 0); | ||
1989 | BUG_ON(obj_priv->active); | 2118 | BUG_ON(obj_priv->active); |
1990 | 2119 | ||
1991 | /* Wait on the rendering and unbind the buffer. */ | 2120 | /* Wait on the rendering and unbind the buffer. */ |
1992 | ret = i915_gem_object_unbind(obj); | 2121 | return i915_gem_object_unbind(obj); |
1993 | break; | ||
1994 | } | 2122 | } |
1995 | 2123 | ||
2124 | if (have_waited) | ||
2125 | return 0; | ||
2126 | |||
1996 | /* If we didn't get anything, but the ring is still processing | 2127 | /* If we didn't get anything, but the ring is still processing |
1997 | * things, wait for one of those things to finish and hopefully | 2128 | * things, wait for the next to finish and hopefully leave us |
1998 | * leave us a buffer to evict. | 2129 | * a buffer to evict. |
1999 | */ | 2130 | */ |
2000 | if (!list_empty(&dev_priv->mm.request_list)) { | 2131 | if (!list_empty(&dev_priv->mm.request_list)) { |
2001 | struct drm_i915_gem_request *request; | 2132 | struct drm_i915_gem_request *request; |
@@ -2006,16 +2137,10 @@ i915_gem_evict_something(struct drm_device *dev) | |||
2006 | 2137 | ||
2007 | ret = i915_wait_request(dev, request->seqno); | 2138 | ret = i915_wait_request(dev, request->seqno); |
2008 | if (ret) | 2139 | if (ret) |
2009 | break; | 2140 | return ret; |
2010 | 2141 | ||
2011 | /* if waiting caused an object to become inactive, | 2142 | have_waited = 1; |
2012 | * then loop around and wait for it. Otherwise, we | 2143 | continue; |
2013 | * assume that waiting freed and unbound something, | ||
2014 | * so there should now be some space in the GTT | ||
2015 | */ | ||
2016 | if (!list_empty(&dev_priv->mm.inactive_list)) | ||
2017 | continue; | ||
2018 | break; | ||
2019 | } | 2144 | } |
2020 | 2145 | ||
2021 | /* If we didn't have anything on the request list but there | 2146 | /* If we didn't have anything on the request list but there |
@@ -2024,6 +2149,9 @@ i915_gem_evict_something(struct drm_device *dev) | |||
2024 | * will get moved to inactive. | 2149 | * will get moved to inactive. |
2025 | */ | 2150 | */ |
2026 | if (!list_empty(&dev_priv->mm.flushing_list)) { | 2151 | if (!list_empty(&dev_priv->mm.flushing_list)) { |
2152 | struct drm_i915_gem_object *obj_priv; | ||
2153 | uint32_t seqno; | ||
2154 | |||
2027 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 2155 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, |
2028 | struct drm_i915_gem_object, | 2156 | struct drm_i915_gem_object, |
2029 | list); | 2157 | list); |
@@ -2032,38 +2160,29 @@ i915_gem_evict_something(struct drm_device *dev) | |||
2032 | i915_gem_flush(dev, | 2160 | i915_gem_flush(dev, |
2033 | obj->write_domain, | 2161 | obj->write_domain, |
2034 | obj->write_domain); | 2162 | obj->write_domain); |
2035 | i915_add_request(dev, NULL, obj->write_domain); | 2163 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2164 | if (seqno == 0) | ||
2165 | return -ENOMEM; | ||
2166 | |||
2167 | ret = i915_wait_request(dev, seqno); | ||
2168 | if (ret) | ||
2169 | return ret; | ||
2036 | 2170 | ||
2037 | obj = NULL; | 2171 | have_waited = 1; |
2038 | continue; | 2172 | continue; |
2039 | } | 2173 | } |
2040 | 2174 | ||
2041 | DRM_ERROR("inactive empty %d request empty %d " | 2175 | /* If we didn't do any of the above, there's no single buffer |
2042 | "flushing empty %d\n", | 2176 | * large enough to swap out for the new one, so just evict |
2043 | list_empty(&dev_priv->mm.inactive_list), | 2177 | * everything and start again. (This should be rare.) |
2044 | list_empty(&dev_priv->mm.request_list), | ||
2045 | list_empty(&dev_priv->mm.flushing_list)); | ||
2046 | /* If we didn't do any of the above, there's nothing to be done | ||
2047 | * and we just can't fit it in. | ||
2048 | */ | 2178 | */ |
2049 | return -ENOSPC; | 2179 | if (!list_empty (&dev_priv->mm.inactive_list)) { |
2050 | } | 2180 | DRM_INFO("GTT full, evicting inactive buffers\n"); |
2051 | return ret; | 2181 | return i915_gem_evict_from_list(dev, |
2052 | } | 2182 | &dev_priv->mm.inactive_list); |
2053 | 2183 | } else | |
2054 | static int | 2184 | return i915_gem_evict_everything(dev); |
2055 | i915_gem_evict_everything(struct drm_device *dev) | ||
2056 | { | ||
2057 | int ret; | ||
2058 | |||
2059 | for (;;) { | ||
2060 | ret = i915_gem_evict_something(dev); | ||
2061 | if (ret != 0) | ||
2062 | break; | ||
2063 | } | 2185 | } |
2064 | if (ret == -ENOSPC) | ||
2065 | return 0; | ||
2066 | return ret; | ||
2067 | } | 2186 | } |
2068 | 2187 | ||
2069 | int | 2188 | int |
@@ -2086,7 +2205,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2086 | BUG_ON(obj_priv->pages != NULL); | 2205 | BUG_ON(obj_priv->pages != NULL); |
2087 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); | 2206 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); |
2088 | if (obj_priv->pages == NULL) { | 2207 | if (obj_priv->pages == NULL) { |
2089 | DRM_ERROR("Faled to allocate page list\n"); | 2208 | DRM_ERROR("Failed to allocate page list\n"); |
2090 | obj_priv->pages_refcount--; | 2209 | obj_priv->pages_refcount--; |
2091 | return -ENOMEM; | 2210 | return -ENOMEM; |
2092 | } | 2211 | } |
@@ -2097,7 +2216,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2097 | page = read_mapping_page(mapping, i, NULL); | 2216 | page = read_mapping_page(mapping, i, NULL); |
2098 | if (IS_ERR(page)) { | 2217 | if (IS_ERR(page)) { |
2099 | ret = PTR_ERR(page); | 2218 | ret = PTR_ERR(page); |
2100 | DRM_ERROR("read_mapping_page failed: %d\n", ret); | ||
2101 | i915_gem_object_put_pages(obj); | 2219 | i915_gem_object_put_pages(obj); |
2102 | return ret; | 2220 | return ret; |
2103 | } | 2221 | } |
@@ -2416,7 +2534,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2416 | drm_i915_private_t *dev_priv = dev->dev_private; | 2534 | drm_i915_private_t *dev_priv = dev->dev_private; |
2417 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2535 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2418 | struct drm_mm_node *free_space; | 2536 | struct drm_mm_node *free_space; |
2419 | int page_count, ret; | 2537 | bool retry_alloc = false; |
2538 | int ret; | ||
2420 | 2539 | ||
2421 | if (dev_priv->mm.suspended) | 2540 | if (dev_priv->mm.suspended) |
2422 | return -EBUSY; | 2541 | return -EBUSY; |
@@ -2445,25 +2564,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2445 | } | 2564 | } |
2446 | } | 2565 | } |
2447 | if (obj_priv->gtt_space == NULL) { | 2566 | if (obj_priv->gtt_space == NULL) { |
2448 | bool lists_empty; | ||
2449 | |||
2450 | /* If the gtt is empty and we're still having trouble | 2567 | /* If the gtt is empty and we're still having trouble |
2451 | * fitting our object in, we're out of memory. | 2568 | * fitting our object in, we're out of memory. |
2452 | */ | 2569 | */ |
2453 | #if WATCH_LRU | 2570 | #if WATCH_LRU |
2454 | DRM_INFO("%s: GTT full, evicting something\n", __func__); | 2571 | DRM_INFO("%s: GTT full, evicting something\n", __func__); |
2455 | #endif | 2572 | #endif |
2456 | spin_lock(&dev_priv->mm.active_list_lock); | 2573 | ret = i915_gem_evict_something(dev, obj->size); |
2457 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2458 | list_empty(&dev_priv->mm.flushing_list) && | ||
2459 | list_empty(&dev_priv->mm.active_list)); | ||
2460 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2461 | if (lists_empty) { | ||
2462 | DRM_ERROR("GTT full, but LRU list empty\n"); | ||
2463 | return -ENOSPC; | ||
2464 | } | ||
2465 | |||
2466 | ret = i915_gem_evict_something(dev); | ||
2467 | if (ret != 0) { | 2574 | if (ret != 0) { |
2468 | if (ret != -ERESTARTSYS) | 2575 | if (ret != -ERESTARTSYS) |
2469 | DRM_ERROR("Failed to evict a buffer %d\n", ret); | 2576 | DRM_ERROR("Failed to evict a buffer %d\n", ret); |
@@ -2476,27 +2583,62 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2476 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2583 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2477 | obj->size, obj_priv->gtt_offset); | 2584 | obj->size, obj_priv->gtt_offset); |
2478 | #endif | 2585 | #endif |
2586 | if (retry_alloc) { | ||
2587 | i915_gem_object_set_page_gfp_mask (obj, | ||
2588 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2589 | } | ||
2479 | ret = i915_gem_object_get_pages(obj); | 2590 | ret = i915_gem_object_get_pages(obj); |
2591 | if (retry_alloc) { | ||
2592 | i915_gem_object_set_page_gfp_mask (obj, | ||
2593 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2594 | } | ||
2480 | if (ret) { | 2595 | if (ret) { |
2481 | drm_mm_put_block(obj_priv->gtt_space); | 2596 | drm_mm_put_block(obj_priv->gtt_space); |
2482 | obj_priv->gtt_space = NULL; | 2597 | obj_priv->gtt_space = NULL; |
2598 | |||
2599 | if (ret == -ENOMEM) { | ||
2600 | /* first try to clear up some space from the GTT */ | ||
2601 | ret = i915_gem_evict_something(dev, obj->size); | ||
2602 | if (ret) { | ||
2603 | if (ret != -ERESTARTSYS) | ||
2604 | DRM_ERROR("Failed to allocate space for backing pages %d\n", ret); | ||
2605 | |||
2606 | /* now try to shrink everyone else */ | ||
2607 | if (! retry_alloc) { | ||
2608 | retry_alloc = true; | ||
2609 | goto search_free; | ||
2610 | } | ||
2611 | |||
2612 | return ret; | ||
2613 | } | ||
2614 | |||
2615 | goto search_free; | ||
2616 | } | ||
2617 | |||
2483 | return ret; | 2618 | return ret; |
2484 | } | 2619 | } |
2485 | 2620 | ||
2486 | page_count = obj->size / PAGE_SIZE; | ||
2487 | /* Create an AGP memory structure pointing at our pages, and bind it | 2621 | /* Create an AGP memory structure pointing at our pages, and bind it |
2488 | * into the GTT. | 2622 | * into the GTT. |
2489 | */ | 2623 | */ |
2490 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2624 | obj_priv->agp_mem = drm_agp_bind_pages(dev, |
2491 | obj_priv->pages, | 2625 | obj_priv->pages, |
2492 | page_count, | 2626 | obj->size >> PAGE_SHIFT, |
2493 | obj_priv->gtt_offset, | 2627 | obj_priv->gtt_offset, |
2494 | obj_priv->agp_type); | 2628 | obj_priv->agp_type); |
2495 | if (obj_priv->agp_mem == NULL) { | 2629 | if (obj_priv->agp_mem == NULL) { |
2496 | i915_gem_object_put_pages(obj); | 2630 | i915_gem_object_put_pages(obj); |
2497 | drm_mm_put_block(obj_priv->gtt_space); | 2631 | drm_mm_put_block(obj_priv->gtt_space); |
2498 | obj_priv->gtt_space = NULL; | 2632 | obj_priv->gtt_space = NULL; |
2499 | return -ENOMEM; | 2633 | |
2634 | ret = i915_gem_evict_something(dev, obj->size); | ||
2635 | if (ret) { | ||
2636 | if (ret != -ERESTARTSYS) | ||
2637 | DRM_ERROR("Failed to allocate space to bind AGP: %d\n", ret); | ||
2638 | return ret; | ||
2639 | } | ||
2640 | |||
2641 | goto search_free; | ||
2500 | } | 2642 | } |
2501 | atomic_inc(&dev->gtt_count); | 2643 | atomic_inc(&dev->gtt_count); |
2502 | atomic_add(obj->size, &dev->gtt_memory); | 2644 | atomic_add(obj->size, &dev->gtt_memory); |
@@ -3423,8 +3565,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3423 | 3565 | ||
3424 | /* error other than GTT full, or we've already tried again */ | 3566 | /* error other than GTT full, or we've already tried again */ |
3425 | if (ret != -ENOSPC || pin_tries >= 1) { | 3567 | if (ret != -ENOSPC || pin_tries >= 1) { |
3426 | if (ret != -ERESTARTSYS) | 3568 | if (ret != -ERESTARTSYS) { |
3427 | DRM_ERROR("Failed to pin buffers %d\n", ret); | 3569 | unsigned long long total_size = 0; |
3570 | for (i = 0; i < args->buffer_count; i++) | ||
3571 | total_size += object_list[i]->size; | ||
3572 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", | ||
3573 | pinned+1, args->buffer_count, | ||
3574 | total_size, ret); | ||
3575 | DRM_ERROR("%d objects [%d pinned], " | ||
3576 | "%d object bytes [%d pinned], " | ||
3577 | "%d/%d gtt bytes\n", | ||
3578 | atomic_read(&dev->object_count), | ||
3579 | atomic_read(&dev->pin_count), | ||
3580 | atomic_read(&dev->object_memory), | ||
3581 | atomic_read(&dev->pin_memory), | ||
3582 | atomic_read(&dev->gtt_memory), | ||
3583 | dev->gtt_total); | ||
3584 | } | ||
3428 | goto err; | 3585 | goto err; |
3429 | } | 3586 | } |
3430 | 3587 | ||
@@ -3435,7 +3592,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3435 | 3592 | ||
3436 | /* evict everyone we can from the aperture */ | 3593 | /* evict everyone we can from the aperture */ |
3437 | ret = i915_gem_evict_everything(dev); | 3594 | ret = i915_gem_evict_everything(dev); |
3438 | if (ret) | 3595 | if (ret && ret != -ENOSPC) |
3439 | goto err; | 3596 | goto err; |
3440 | } | 3597 | } |
3441 | 3598 | ||
@@ -4568,12 +4725,6 @@ i915_gem_object_truncate(struct drm_gem_object *obj) | |||
4568 | mutex_unlock(&inode->i_mutex); | 4725 | mutex_unlock(&inode->i_mutex); |
4569 | } | 4726 | } |
4570 | 4727 | ||
4571 | static inline int | ||
4572 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | ||
4573 | { | ||
4574 | return !obj_priv->dirty || obj_priv->madv == I915_MADV_DONTNEED; | ||
4575 | } | ||
4576 | |||
4577 | static int | 4728 | static int |
4578 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | 4729 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) |
4579 | { | 4730 | { |