aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c145
1 files changed, 99 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2748609f05b..ec8a0d7ffa3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
277 277
278 mutex_lock(&dev->struct_mutex); 278 mutex_lock(&dev->struct_mutex);
279 279
280 ret = i915_gem_object_get_pages(obj); 280 ret = i915_gem_object_get_pages(obj, 0);
281 if (ret != 0) 281 if (ret != 0)
282 goto fail_unlock; 282 goto fail_unlock;
283 283
@@ -321,40 +321,24 @@ fail_unlock:
321 return ret; 321 return ret;
322} 322}
323 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int 324static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 325i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{ 326{
339 int ret; 327 int ret;
340 328
341 ret = i915_gem_object_get_pages(obj); 329 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
342 330
343 /* If we've insufficient memory to map in the pages, attempt 331 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers. 332 * to make some space by throwing out some old buffers.
345 */ 333 */
346 if (ret == -ENOMEM) { 334 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev; 335 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349 336
350 ret = i915_gem_evict_something(dev, obj->size); 337 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret) 338 if (ret)
352 return ret; 339 return ret;
353 340
354 gfp = i915_gem_object_get_page_gfp_mask(obj); 341 ret = i915_gem_object_get_pages(obj, 0);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 } 342 }
359 343
360 return ret; 344 return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
790 774
791 mutex_lock(&dev->struct_mutex); 775 mutex_lock(&dev->struct_mutex);
792 776
793 ret = i915_gem_object_get_pages(obj); 777 ret = i915_gem_object_get_pages(obj, 0);
794 if (ret != 0) 778 if (ret != 0)
795 goto fail_unlock; 779 goto fail_unlock;
796 780
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1568 else 1552 else
1569 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1570 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1571 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1572 if (obj_priv->active) { 1558 if (obj_priv->active) {
1573 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1638 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1639 1625
1640 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1641 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1642 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1643 1630
1644 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1646 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1647 1634
1648 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1649 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1650 1638
1651 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2100,8 +2088,8 @@ static int
2100i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2101{ 2089{
2102 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2103 uint32_t seqno;
2104 int ret; 2091 int ret;
2092 uint32_t seqno;
2105 bool lists_empty; 2093 bool lists_empty;
2106 2094
2107 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2123,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2123 if (ret) 2111 if (ret)
2124 return ret; 2112 return ret;
2125 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2126 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2127 if (ret) 2117 if (ret)
2128 return ret; 2118 return ret;
@@ -2230,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2230} 2220}
2231 2221
2232int 2222int
2233i915_gem_object_get_pages(struct drm_gem_object *obj) 2223i915_gem_object_get_pages(struct drm_gem_object *obj,
2224 gfp_t gfpmask)
2234{ 2225{
2235 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2226 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2236 int page_count, i; 2227 int page_count, i;
@@ -2256,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2256 inode = obj->filp->f_path.dentry->d_inode; 2247 inode = obj->filp->f_path.dentry->d_inode;
2257 mapping = inode->i_mapping; 2248 mapping = inode->i_mapping;
2258 for (i = 0; i < page_count; i++) { 2249 for (i = 0; i < page_count; i++) {
2259 page = read_mapping_page(mapping, i, NULL); 2250 page = read_cache_page_gfp(mapping, i,
2251 mapping_gfp_mask (mapping) |
2252 __GFP_COLD |
2253 gfpmask);
2260 if (IS_ERR(page)) { 2254 if (IS_ERR(page)) {
2261 ret = PTR_ERR(page); 2255 ret = PTR_ERR(page);
2262 i915_gem_object_put_pages(obj); 2256 i915_gem_object_put_pages(obj);
@@ -2579,7 +2573,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2579 drm_i915_private_t *dev_priv = dev->dev_private; 2573 drm_i915_private_t *dev_priv = dev->dev_private;
2580 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2574 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2581 struct drm_mm_node *free_space; 2575 struct drm_mm_node *free_space;
2582 bool retry_alloc = false; 2576 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2583 int ret; 2577 int ret;
2584 2578
2585 if (obj_priv->madv != I915_MADV_WILLNEED) { 2579 if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2623,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2623 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2617 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2624 obj->size, obj_priv->gtt_offset); 2618 obj->size, obj_priv->gtt_offset);
2625#endif 2619#endif
2626 if (retry_alloc) { 2620 ret = i915_gem_object_get_pages(obj, gfpmask);
2627 i915_gem_object_set_page_gfp_mask (obj,
2628 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2629 }
2630 ret = i915_gem_object_get_pages(obj);
2631 if (retry_alloc) {
2632 i915_gem_object_set_page_gfp_mask (obj,
2633 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2634 }
2635 if (ret) { 2621 if (ret) {
2636 drm_mm_put_block(obj_priv->gtt_space); 2622 drm_mm_put_block(obj_priv->gtt_space);
2637 obj_priv->gtt_space = NULL; 2623 obj_priv->gtt_space = NULL;
@@ -2641,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2641 ret = i915_gem_evict_something(dev, obj->size); 2627 ret = i915_gem_evict_something(dev, obj->size);
2642 if (ret) { 2628 if (ret) {
2643 /* now try to shrink everyone else */ 2629 /* now try to shrink everyone else */
2644 if (! retry_alloc) { 2630 if (gfpmask) {
2645 retry_alloc = true; 2631 gfpmask = 0;
2646 goto search_free; 2632 goto search_free;
2647 } 2633 }
2648 2634
2649 return ret; 2635 return ret;
@@ -2721,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2721 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2722 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2723 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2724 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2725 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2726 2712
2727 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -2837,6 +2823,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2837 return 0; 2823 return 0;
2838} 2824}
2839 2825
2826/*
2827 * Prepare buffer for display plane. Use uninterruptible for possible flush
2828 * wait, as in modesetting process we're not supposed to be interrupted.
2829 */
2830int
2831i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2832{
2833 struct drm_device *dev = obj->dev;
2834 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2835 uint32_t old_write_domain, old_read_domains;
2836 int ret;
2837
2838 /* Not valid to be called on unbound objects. */
2839 if (obj_priv->gtt_space == NULL)
2840 return -EINVAL;
2841
2842 i915_gem_object_flush_gpu_write_domain(obj);
2843
2844 /* Wait on any GPU rendering and flushing to occur. */
2845 if (obj_priv->active) {
2846#if WATCH_BUF
2847 DRM_INFO("%s: object %p wait for seqno %08x\n",
2848 __func__, obj, obj_priv->last_rendering_seqno);
2849#endif
2850 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2851 if (ret != 0)
2852 return ret;
2853 }
2854
2855 old_write_domain = obj->write_domain;
2856 old_read_domains = obj->read_domains;
2857
2858 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2859
2860 i915_gem_object_flush_cpu_write_domain(obj);
2861
2862 /* It should now be out of any other write domains, and we can update
2863 * the domain values for our changes.
2864 */
2865 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2866 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2867 obj->write_domain = I915_GEM_DOMAIN_GTT;
2868 obj_priv->dirty = 1;
2869
2870 trace_i915_gem_object_change_domain(obj,
2871 old_read_domains,
2872 old_write_domain);
2873
2874 return 0;
2875}
2876
2840/** 2877/**
2841 * Moves a single object to the CPU read, and possibly write domain. 2878 * Moves a single object to the CPU read, and possibly write domain.
2842 * 2879 *
@@ -3533,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3533 uint32_t reloc_count = 0, i; 3570 uint32_t reloc_count = 0, i;
3534 int ret = 0; 3571 int ret = 0;
3535 3572
3573 if (relocs == NULL)
3574 return 0;
3575
3536 for (i = 0; i < buffer_count; i++) { 3576 for (i = 0; i < buffer_count; i++) {
3537 struct drm_i915_gem_relocation_entry __user *user_relocs; 3577 struct drm_i915_gem_relocation_entry __user *user_relocs;
3538 int unwritten; 3578 int unwritten;
@@ -3622,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3622 struct drm_gem_object *batch_obj; 3662 struct drm_gem_object *batch_obj;
3623 struct drm_i915_gem_object *obj_priv; 3663 struct drm_i915_gem_object *obj_priv;
3624 struct drm_clip_rect *cliprects = NULL; 3664 struct drm_clip_rect *cliprects = NULL;
3625 struct drm_i915_gem_relocation_entry *relocs; 3665 struct drm_i915_gem_relocation_entry *relocs = NULL;
3626 int ret = 0, ret2, i, pinned = 0; 3666 int ret = 0, ret2, i, pinned = 0;
3627 uint64_t exec_offset; 3667 uint64_t exec_offset;
3628 uint32_t seqno, flush_domains, reloc_index; 3668 uint32_t seqno, flush_domains, reloc_index;
@@ -3648,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3648 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3649 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3650 GFP_KERNEL); 3690 GFP_KERNEL);
3651 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3652 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3653 3695
3654 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3655 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3691,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3691 if (object_list[i] == NULL) { 3733 if (object_list[i] == NULL) {
3692 DRM_ERROR("Invalid object handle %d at index %d\n", 3734 DRM_ERROR("Invalid object handle %d at index %d\n",
3693 exec_list[i].handle, i); 3735 exec_list[i].handle, i);
3736 /* prevent error path from reading uninitialized data */
3737 args->buffer_count = i + 1;
3694 ret = -EBADF; 3738 ret = -EBADF;
3695 goto err; 3739 goto err;
3696 } 3740 }
@@ -3699,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3699 if (obj_priv->in_execbuffer) { 3743 if (obj_priv->in_execbuffer) {
3700 DRM_ERROR("Object %p appears more than once in object list\n", 3744 DRM_ERROR("Object %p appears more than once in object list\n",
3701 object_list[i]); 3745 object_list[i]);
3746 /* prevent error path from reading uninitialized data */
3747 args->buffer_count = i + 1;
3702 ret = -EBADF; 3748 ret = -EBADF;
3703 goto err; 3749 goto err;
3704 } 3750 }
@@ -3812,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3812 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3813 dev->invalidate_domains, 3859 dev->invalidate_domains,
3814 dev->flush_domains); 3860 dev->flush_domains);
3815 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3816 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3817 dev->flush_domains); 3863 dev->flush_domains);
3818 } 3864 }
3819 3865
3820 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3821 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3822 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3823 3870
3824 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3825 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3826 obj->read_domains, 3879 obj->read_domains,
3827 old_write_domain); 3880 old_write_domain);
@@ -3895,6 +3948,7 @@ err:
3895 3948
3896 mutex_unlock(&dev->struct_mutex); 3949 mutex_unlock(&dev->struct_mutex);
3897 3950
3951pre_mutex_err:
3898 /* Copy the updated relocations out regardless of current error 3952 /* Copy the updated relocations out regardless of current error
3899 * state. Failure to update the relocs would mean that the next 3953 * state. Failure to update the relocs would mean that the next
3900 * time userland calls execbuf, it would do so with presumed offset 3954 * time userland calls execbuf, it would do so with presumed offset
@@ -3909,7 +3963,6 @@ err:
3909 ret = ret2; 3963 ret = ret2;
3910 } 3964 }
3911 3965
3912pre_mutex_err:
3913 drm_free_large(object_list); 3966 drm_free_large(object_list);
3914 kfree(cliprects); 3967 kfree(cliprects);
3915 3968
@@ -4000,8 +4053,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
4000 "back to user (%d)\n", 4053 "back to user (%d)\n",
4001 args->buffer_count, ret); 4054 args->buffer_count, ret);
4002 } 4055 }
4003 } else {
4004 DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
4005 } 4056 }
4006 4057
4007 drm_free_large(exec_list); 4058 drm_free_large(exec_list);
@@ -4334,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4334 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4335 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4336 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4337 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4338 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4339 4391
@@ -4785,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4785 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4786 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4787 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4788 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4789 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4790 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4897,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4897 if (!obj_priv->phys_obj) 4950 if (!obj_priv->phys_obj)
4898 return; 4951 return;
4899 4952
4900 ret = i915_gem_object_get_pages(obj); 4953 ret = i915_gem_object_get_pages(obj, 0);
4901 if (ret) 4954 if (ret)
4902 goto out; 4955 goto out;
4903 4956
@@ -4955,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4955 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 5008 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4956 obj_priv->phys_obj->cur_obj = obj; 5009 obj_priv->phys_obj->cur_obj = obj;
4957 5010
4958 ret = i915_gem_object_get_pages(obj); 5011 ret = i915_gem_object_get_pages(obj, 0);
4959 if (ret) { 5012 if (ret) {
4960 DRM_ERROR("failed to get page list\n"); 5013 DRM_ERROR("failed to get page list\n");
4961 goto out; 5014 goto out;