diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 107 |
1 files changed, 68 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2748609f05b3..dda787aafcc6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
277 | 277 | ||
278 | mutex_lock(&dev->struct_mutex); | 278 | mutex_lock(&dev->struct_mutex); |
279 | 279 | ||
280 | ret = i915_gem_object_get_pages(obj); | 280 | ret = i915_gem_object_get_pages(obj, 0); |
281 | if (ret != 0) | 281 | if (ret != 0) |
282 | goto fail_unlock; | 282 | goto fail_unlock; |
283 | 283 | ||
@@ -321,40 +321,24 @@ fail_unlock: | |||
321 | return ret; | 321 | return ret; |
322 | } | 322 | } |
323 | 323 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | 324 | static int |
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | 325 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) |
338 | { | 326 | { |
339 | int ret; | 327 | int ret; |
340 | 328 | ||
341 | ret = i915_gem_object_get_pages(obj); | 329 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); |
342 | 330 | ||
343 | /* If we've insufficient memory to map in the pages, attempt | 331 | /* If we've insufficient memory to map in the pages, attempt |
344 | * to make some space by throwing out some old buffers. | 332 | * to make some space by throwing out some old buffers. |
345 | */ | 333 | */ |
346 | if (ret == -ENOMEM) { | 334 | if (ret == -ENOMEM) { |
347 | struct drm_device *dev = obj->dev; | 335 | struct drm_device *dev = obj->dev; |
348 | gfp_t gfp; | ||
349 | 336 | ||
350 | ret = i915_gem_evict_something(dev, obj->size); | 337 | ret = i915_gem_evict_something(dev, obj->size); |
351 | if (ret) | 338 | if (ret) |
352 | return ret; | 339 | return ret; |
353 | 340 | ||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | 341 | ret = i915_gem_object_get_pages(obj, 0); |
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | 342 | } |
359 | 343 | ||
360 | return ret; | 344 | return ret; |
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
790 | 774 | ||
791 | mutex_lock(&dev->struct_mutex); | 775 | mutex_lock(&dev->struct_mutex); |
792 | 776 | ||
793 | ret = i915_gem_object_get_pages(obj); | 777 | ret = i915_gem_object_get_pages(obj, 0); |
794 | if (ret != 0) | 778 | if (ret != 0) |
795 | goto fail_unlock; | 779 | goto fail_unlock; |
796 | 780 | ||
@@ -2230,7 +2214,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2230 | } | 2214 | } |
2231 | 2215 | ||
2232 | int | 2216 | int |
2233 | i915_gem_object_get_pages(struct drm_gem_object *obj) | 2217 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2218 | gfp_t gfpmask) | ||
2234 | { | 2219 | { |
2235 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2220 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2236 | int page_count, i; | 2221 | int page_count, i; |
@@ -2256,7 +2241,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2256 | inode = obj->filp->f_path.dentry->d_inode; | 2241 | inode = obj->filp->f_path.dentry->d_inode; |
2257 | mapping = inode->i_mapping; | 2242 | mapping = inode->i_mapping; |
2258 | for (i = 0; i < page_count; i++) { | 2243 | for (i = 0; i < page_count; i++) { |
2259 | page = read_mapping_page(mapping, i, NULL); | 2244 | page = read_cache_page_gfp(mapping, i, |
2245 | mapping_gfp_mask (mapping) | | ||
2246 | __GFP_COLD | | ||
2247 | gfpmask); | ||
2260 | if (IS_ERR(page)) { | 2248 | if (IS_ERR(page)) { |
2261 | ret = PTR_ERR(page); | 2249 | ret = PTR_ERR(page); |
2262 | i915_gem_object_put_pages(obj); | 2250 | i915_gem_object_put_pages(obj); |
@@ -2579,7 +2567,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2579 | drm_i915_private_t *dev_priv = dev->dev_private; | 2567 | drm_i915_private_t *dev_priv = dev->dev_private; |
2580 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2568 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2581 | struct drm_mm_node *free_space; | 2569 | struct drm_mm_node *free_space; |
2582 | bool retry_alloc = false; | 2570 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2583 | int ret; | 2571 | int ret; |
2584 | 2572 | ||
2585 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2573 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
@@ -2623,15 +2611,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2623 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2611 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2624 | obj->size, obj_priv->gtt_offset); | 2612 | obj->size, obj_priv->gtt_offset); |
2625 | #endif | 2613 | #endif |
2626 | if (retry_alloc) { | 2614 | ret = i915_gem_object_get_pages(obj, gfpmask); |
2627 | i915_gem_object_set_page_gfp_mask (obj, | ||
2628 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2629 | } | ||
2630 | ret = i915_gem_object_get_pages(obj); | ||
2631 | if (retry_alloc) { | ||
2632 | i915_gem_object_set_page_gfp_mask (obj, | ||
2633 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2634 | } | ||
2635 | if (ret) { | 2615 | if (ret) { |
2636 | drm_mm_put_block(obj_priv->gtt_space); | 2616 | drm_mm_put_block(obj_priv->gtt_space); |
2637 | obj_priv->gtt_space = NULL; | 2617 | obj_priv->gtt_space = NULL; |
@@ -2641,9 +2621,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2641 | ret = i915_gem_evict_something(dev, obj->size); | 2621 | ret = i915_gem_evict_something(dev, obj->size); |
2642 | if (ret) { | 2622 | if (ret) { |
2643 | /* now try to shrink everyone else */ | 2623 | /* now try to shrink everyone else */ |
2644 | if (! retry_alloc) { | 2624 | if (gfpmask) { |
2645 | retry_alloc = true; | 2625 | gfpmask = 0; |
2646 | goto search_free; | 2626 | goto search_free; |
2647 | } | 2627 | } |
2648 | 2628 | ||
2649 | return ret; | 2629 | return ret; |
@@ -2837,6 +2817,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2837 | return 0; | 2817 | return 0; |
2838 | } | 2818 | } |
2839 | 2819 | ||
2820 | /* | ||
2821 | * Prepare buffer for display plane. Use uninterruptible for possible flush | ||
2822 | * wait, as in modesetting process we're not supposed to be interrupted. | ||
2823 | */ | ||
2824 | int | ||
2825 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | ||
2826 | { | ||
2827 | struct drm_device *dev = obj->dev; | ||
2828 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2829 | uint32_t old_write_domain, old_read_domains; | ||
2830 | int ret; | ||
2831 | |||
2832 | /* Not valid to be called on unbound objects. */ | ||
2833 | if (obj_priv->gtt_space == NULL) | ||
2834 | return -EINVAL; | ||
2835 | |||
2836 | i915_gem_object_flush_gpu_write_domain(obj); | ||
2837 | |||
2838 | /* Wait on any GPU rendering and flushing to occur. */ | ||
2839 | if (obj_priv->active) { | ||
2840 | #if WATCH_BUF | ||
2841 | DRM_INFO("%s: object %p wait for seqno %08x\n", | ||
2842 | __func__, obj, obj_priv->last_rendering_seqno); | ||
2843 | #endif | ||
2844 | ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); | ||
2845 | if (ret != 0) | ||
2846 | return ret; | ||
2847 | } | ||
2848 | |||
2849 | old_write_domain = obj->write_domain; | ||
2850 | old_read_domains = obj->read_domains; | ||
2851 | |||
2852 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
2853 | |||
2854 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2855 | |||
2856 | /* It should now be out of any other write domains, and we can update | ||
2857 | * the domain values for our changes. | ||
2858 | */ | ||
2859 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
2860 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | ||
2861 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
2862 | obj_priv->dirty = 1; | ||
2863 | |||
2864 | trace_i915_gem_object_change_domain(obj, | ||
2865 | old_read_domains, | ||
2866 | old_write_domain); | ||
2867 | |||
2868 | return 0; | ||
2869 | } | ||
2870 | |||
2840 | /** | 2871 | /** |
2841 | * Moves a single object to the CPU read, and possibly write domain. | 2872 | * Moves a single object to the CPU read, and possibly write domain. |
2842 | * | 2873 | * |
@@ -4000,8 +4031,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
4000 | "back to user (%d)\n", | 4031 | "back to user (%d)\n", |
4001 | args->buffer_count, ret); | 4032 | args->buffer_count, ret); |
4002 | } | 4033 | } |
4003 | } else { | ||
4004 | DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret); | ||
4005 | } | 4034 | } |
4006 | 4035 | ||
4007 | drm_free_large(exec_list); | 4036 | drm_free_large(exec_list); |
@@ -4897,7 +4926,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4897 | if (!obj_priv->phys_obj) | 4926 | if (!obj_priv->phys_obj) |
4898 | return; | 4927 | return; |
4899 | 4928 | ||
4900 | ret = i915_gem_object_get_pages(obj); | 4929 | ret = i915_gem_object_get_pages(obj, 0); |
4901 | if (ret) | 4930 | if (ret) |
4902 | goto out; | 4931 | goto out; |
4903 | 4932 | ||
@@ -4955,7 +4984,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4955 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 4984 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
4956 | obj_priv->phys_obj->cur_obj = obj; | 4985 | obj_priv->phys_obj->cur_obj = obj; |
4957 | 4986 | ||
4958 | ret = i915_gem_object_get_pages(obj); | 4987 | ret = i915_gem_object_get_pages(obj, 0); |
4959 | if (ret) { | 4988 | if (ret) { |
4960 | DRM_ERROR("failed to get page list\n"); | 4989 | DRM_ERROR("failed to get page list\n"); |
4961 | goto out; | 4990 | goto out; |