diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 58 |
1 files changed, 26 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 12d32579b951..5c0d1247f453 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" | 32 | #include "i915_trace.h" |
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include <linux/shmem_fs.h> | ||
34 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
35 | #include <linux/swap.h> | 36 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, | |||
359 | if ((page_offset + remain) > PAGE_SIZE) | 360 | if ((page_offset + remain) > PAGE_SIZE) |
360 | page_length = PAGE_SIZE - page_offset; | 361 | page_length = PAGE_SIZE - page_offset; |
361 | 362 | ||
362 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 363 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
363 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
364 | if (IS_ERR(page)) | 364 | if (IS_ERR(page)) |
365 | return PTR_ERR(page); | 365 | return PTR_ERR(page); |
366 | 366 | ||
@@ -463,10 +463,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, | |||
463 | if ((data_page_offset + page_length) > PAGE_SIZE) | 463 | if ((data_page_offset + page_length) > PAGE_SIZE) |
464 | page_length = PAGE_SIZE - data_page_offset; | 464 | page_length = PAGE_SIZE - data_page_offset; |
465 | 465 | ||
466 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 466 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
467 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 467 | if (IS_ERR(page)) { |
468 | if (IS_ERR(page)) | 468 | ret = PTR_ERR(page); |
469 | return PTR_ERR(page); | 469 | goto out; |
470 | } | ||
470 | 471 | ||
471 | if (do_bit17_swizzling) { | 472 | if (do_bit17_swizzling) { |
472 | slow_shmem_bit17_copy(page, | 473 | slow_shmem_bit17_copy(page, |
@@ -795,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, | |||
795 | if ((page_offset + remain) > PAGE_SIZE) | 796 | if ((page_offset + remain) > PAGE_SIZE) |
796 | page_length = PAGE_SIZE - page_offset; | 797 | page_length = PAGE_SIZE - page_offset; |
797 | 798 | ||
798 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 799 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
799 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
800 | if (IS_ERR(page)) | 800 | if (IS_ERR(page)) |
801 | return PTR_ERR(page); | 801 | return PTR_ERR(page); |
802 | 802 | ||
@@ -905,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, | |||
905 | if ((data_page_offset + page_length) > PAGE_SIZE) | 905 | if ((data_page_offset + page_length) > PAGE_SIZE) |
906 | page_length = PAGE_SIZE - data_page_offset; | 906 | page_length = PAGE_SIZE - data_page_offset; |
907 | 907 | ||
908 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 908 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
909 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
910 | if (IS_ERR(page)) { | 909 | if (IS_ERR(page)) { |
911 | ret = PTR_ERR(page); | 910 | ret = PTR_ERR(page); |
912 | goto out; | 911 | goto out; |
@@ -1217,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1217 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); | 1216 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); |
1218 | if (ret) | 1217 | if (ret) |
1219 | goto unlock; | 1218 | goto unlock; |
1220 | } | ||
1221 | 1219 | ||
1222 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 1220 | ret = i915_gem_object_set_to_gtt_domain(obj, write); |
1223 | if (ret) | 1221 | if (ret) |
1224 | goto unlock; | 1222 | goto unlock; |
1223 | } | ||
1225 | 1224 | ||
1226 | if (obj->tiling_mode == I915_TILING_NONE) | 1225 | if (obj->tiling_mode == I915_TILING_NONE) |
1227 | ret = i915_gem_object_put_fence(obj); | 1226 | ret = i915_gem_object_put_fence(obj); |
@@ -1556,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | |||
1556 | 1555 | ||
1557 | inode = obj->base.filp->f_path.dentry->d_inode; | 1556 | inode = obj->base.filp->f_path.dentry->d_inode; |
1558 | mapping = inode->i_mapping; | 1557 | mapping = inode->i_mapping; |
1558 | gfpmask |= mapping_gfp_mask(mapping); | ||
1559 | |||
1559 | for (i = 0; i < page_count; i++) { | 1560 | for (i = 0; i < page_count; i++) { |
1560 | page = read_cache_page_gfp(mapping, i, | 1561 | page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); |
1561 | GFP_HIGHUSER | | ||
1562 | __GFP_COLD | | ||
1563 | __GFP_RECLAIMABLE | | ||
1564 | gfpmask); | ||
1565 | if (IS_ERR(page)) | 1562 | if (IS_ERR(page)) |
1566 | goto err_pages; | 1563 | goto err_pages; |
1567 | 1564 | ||
@@ -1699,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) | |||
1699 | /* Our goal here is to return as much of the memory as | 1696 | /* Our goal here is to return as much of the memory as |
1700 | * is possible back to the system as we are called from OOM. | 1697 | * is possible back to the system as we are called from OOM. |
1701 | * To do this we must instruct the shmfs to drop all of its | 1698 | * To do this we must instruct the shmfs to drop all of its |
1702 | * backing pages, *now*. Here we mirror the actions taken | 1699 | * backing pages, *now*. |
1703 | * when by shmem_delete_inode() to release the backing store. | ||
1704 | */ | 1700 | */ |
1705 | inode = obj->base.filp->f_path.dentry->d_inode; | 1701 | inode = obj->base.filp->f_path.dentry->d_inode; |
1706 | truncate_inode_pages(inode->i_mapping, 0); | 1702 | shmem_truncate_range(inode, 0, (loff_t)-1); |
1707 | if (inode->i_op->truncate_range) | ||
1708 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | ||
1709 | 1703 | ||
1710 | obj->madv = __I915_MADV_PURGED; | 1704 | obj->madv = __I915_MADV_PURGED; |
1711 | } | 1705 | } |
@@ -2078,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring, | |||
2078 | if (!ier) { | 2072 | if (!ier) { |
2079 | DRM_ERROR("something (likely vbetool) disabled " | 2073 | DRM_ERROR("something (likely vbetool) disabled " |
2080 | "interrupts, re-enabling\n"); | 2074 | "interrupts, re-enabling\n"); |
2081 | i915_driver_irq_preinstall(ring->dev); | 2075 | ring->dev->driver->irq_preinstall(ring->dev); |
2082 | i915_driver_irq_postinstall(ring->dev); | 2076 | ring->dev->driver->irq_postinstall(ring->dev); |
2083 | } | 2077 | } |
2084 | 2078 | ||
2085 | trace_i915_gem_request_wait_begin(ring, seqno); | 2079 | trace_i915_gem_request_wait_begin(ring, seqno); |
@@ -2924,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) | |||
2924 | */ | 2918 | */ |
2925 | wmb(); | 2919 | wmb(); |
2926 | 2920 | ||
2927 | i915_gem_release_mmap(obj); | ||
2928 | |||
2929 | old_write_domain = obj->base.write_domain; | 2921 | old_write_domain = obj->base.write_domain; |
2930 | obj->base.write_domain = 0; | 2922 | obj->base.write_domain = 0; |
2931 | 2923 | ||
@@ -3565,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3565 | { | 3557 | { |
3566 | struct drm_i915_private *dev_priv = dev->dev_private; | 3558 | struct drm_i915_private *dev_priv = dev->dev_private; |
3567 | struct drm_i915_gem_object *obj; | 3559 | struct drm_i915_gem_object *obj; |
3560 | struct address_space *mapping; | ||
3568 | 3561 | ||
3569 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | 3562 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
3570 | if (obj == NULL) | 3563 | if (obj == NULL) |
@@ -3575,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3575 | return NULL; | 3568 | return NULL; |
3576 | } | 3569 | } |
3577 | 3570 | ||
3571 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | ||
3572 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
3573 | |||
3578 | i915_gem_info_add_obj(dev_priv, size); | 3574 | i915_gem_info_add_obj(dev_priv, size); |
3579 | 3575 | ||
3580 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3576 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
@@ -3950,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
3950 | 3946 | ||
3951 | page_count = obj->base.size / PAGE_SIZE; | 3947 | page_count = obj->base.size / PAGE_SIZE; |
3952 | for (i = 0; i < page_count; i++) { | 3948 | for (i = 0; i < page_count; i++) { |
3953 | struct page *page = read_cache_page_gfp(mapping, i, | 3949 | struct page *page = shmem_read_mapping_page(mapping, i); |
3954 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
3955 | if (!IS_ERR(page)) { | 3950 | if (!IS_ERR(page)) { |
3956 | char *dst = kmap_atomic(page); | 3951 | char *dst = kmap_atomic(page); |
3957 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); | 3952 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); |
@@ -4012,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4012 | struct page *page; | 4007 | struct page *page; |
4013 | char *dst, *src; | 4008 | char *dst, *src; |
4014 | 4009 | ||
4015 | page = read_cache_page_gfp(mapping, i, | 4010 | page = shmem_read_mapping_page(mapping, i); |
4016 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
4017 | if (IS_ERR(page)) | 4011 | if (IS_ERR(page)) |
4018 | return PTR_ERR(page); | 4012 | return PTR_ERR(page); |
4019 | 4013 | ||