diff options
author | Keith Packard <keithp@keithp.com> | 2011-06-29 13:34:54 -0400 |
---|---|---|
committer | Keith Packard <keithp@keithp.com> | 2011-06-29 13:34:54 -0400 |
commit | 8eb2c0ee67f4853c0e433394f65ef8143a748c80 (patch) | |
tree | ea74ea0878b2f07eb70f2d5b42be5ec42f3bdb5f /drivers/gpu/drm/i915/i915_gem.c | |
parent | 3e0dc6b01f5301d63046f6deddde2c7f5c57d67a (diff) | |
parent | 0d72c6fcb5cd215eeab824fac216ea42c5b574aa (diff) |
Merge branch 'drm-intel-fixes' into drm-intel-next
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 38 |
1 files changed, 16 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8b670e7ee404..e9d1d5c3a696 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" | 32 | #include "i915_trace.h" |
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include <linux/shmem_fs.h> | ||
34 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
35 | #include <linux/swap.h> | 36 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, | |||
359 | if ((page_offset + remain) > PAGE_SIZE) | 360 | if ((page_offset + remain) > PAGE_SIZE) |
360 | page_length = PAGE_SIZE - page_offset; | 361 | page_length = PAGE_SIZE - page_offset; |
361 | 362 | ||
362 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 363 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
363 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
364 | if (IS_ERR(page)) | 364 | if (IS_ERR(page)) |
365 | return PTR_ERR(page); | 365 | return PTR_ERR(page); |
366 | 366 | ||
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, | |||
463 | if ((data_page_offset + page_length) > PAGE_SIZE) | 463 | if ((data_page_offset + page_length) > PAGE_SIZE) |
464 | page_length = PAGE_SIZE - data_page_offset; | 464 | page_length = PAGE_SIZE - data_page_offset; |
465 | 465 | ||
466 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 466 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
467 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
468 | if (IS_ERR(page)) { | 467 | if (IS_ERR(page)) { |
469 | ret = PTR_ERR(page); | 468 | ret = PTR_ERR(page); |
470 | goto out; | 469 | goto out; |
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, | |||
797 | if ((page_offset + remain) > PAGE_SIZE) | 796 | if ((page_offset + remain) > PAGE_SIZE) |
798 | page_length = PAGE_SIZE - page_offset; | 797 | page_length = PAGE_SIZE - page_offset; |
799 | 798 | ||
800 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 799 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
801 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
802 | if (IS_ERR(page)) | 800 | if (IS_ERR(page)) |
803 | return PTR_ERR(page); | 801 | return PTR_ERR(page); |
804 | 802 | ||
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, | |||
907 | if ((data_page_offset + page_length) > PAGE_SIZE) | 905 | if ((data_page_offset + page_length) > PAGE_SIZE) |
908 | page_length = PAGE_SIZE - data_page_offset; | 906 | page_length = PAGE_SIZE - data_page_offset; |
909 | 907 | ||
910 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 908 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
911 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
912 | if (IS_ERR(page)) { | 909 | if (IS_ERR(page)) { |
913 | ret = PTR_ERR(page); | 910 | ret = PTR_ERR(page); |
914 | goto out; | 911 | goto out; |
@@ -1558,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | |||
1558 | 1555 | ||
1559 | inode = obj->base.filp->f_path.dentry->d_inode; | 1556 | inode = obj->base.filp->f_path.dentry->d_inode; |
1560 | mapping = inode->i_mapping; | 1557 | mapping = inode->i_mapping; |
1558 | gfpmask |= mapping_gfp_mask(mapping); | ||
1559 | |||
1561 | for (i = 0; i < page_count; i++) { | 1560 | for (i = 0; i < page_count; i++) { |
1562 | page = read_cache_page_gfp(mapping, i, | 1561 | page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); |
1563 | GFP_HIGHUSER | | ||
1564 | __GFP_COLD | | ||
1565 | __GFP_RECLAIMABLE | | ||
1566 | gfpmask); | ||
1567 | if (IS_ERR(page)) | 1562 | if (IS_ERR(page)) |
1568 | goto err_pages; | 1563 | goto err_pages; |
1569 | 1564 | ||
@@ -1701,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) | |||
1701 | /* Our goal here is to return as much of the memory as | 1696 | /* Our goal here is to return as much of the memory as |
1702 | * is possible back to the system as we are called from OOM. | 1697 | * is possible back to the system as we are called from OOM. |
1703 | * To do this we must instruct the shmfs to drop all of its | 1698 | * To do this we must instruct the shmfs to drop all of its |
1704 | * backing pages, *now*. Here we mirror the actions taken | 1699 | * backing pages, *now*. |
1705 | * when by shmem_delete_inode() to release the backing store. | ||
1706 | */ | 1700 | */ |
1707 | inode = obj->base.filp->f_path.dentry->d_inode; | 1701 | inode = obj->base.filp->f_path.dentry->d_inode; |
1708 | truncate_inode_pages(inode->i_mapping, 0); | 1702 | shmem_truncate_range(inode, 0, (loff_t)-1); |
1709 | if (inode->i_op->truncate_range) | ||
1710 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | ||
1711 | 1703 | ||
1712 | obj->madv = __I915_MADV_PURGED; | 1704 | obj->madv = __I915_MADV_PURGED; |
1713 | } | 1705 | } |
@@ -3688,6 +3680,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3688 | { | 3680 | { |
3689 | struct drm_i915_private *dev_priv = dev->dev_private; | 3681 | struct drm_i915_private *dev_priv = dev->dev_private; |
3690 | struct drm_i915_gem_object *obj; | 3682 | struct drm_i915_gem_object *obj; |
3683 | struct address_space *mapping; | ||
3691 | 3684 | ||
3692 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | 3685 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
3693 | if (obj == NULL) | 3686 | if (obj == NULL) |
@@ -3698,6 +3691,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3698 | return NULL; | 3691 | return NULL; |
3699 | } | 3692 | } |
3700 | 3693 | ||
3694 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | ||
3695 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
3696 | |||
3701 | i915_gem_info_add_obj(dev_priv, size); | 3697 | i915_gem_info_add_obj(dev_priv, size); |
3702 | 3698 | ||
3703 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3699 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
@@ -4089,8 +4085,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4089 | 4085 | ||
4090 | page_count = obj->base.size / PAGE_SIZE; | 4086 | page_count = obj->base.size / PAGE_SIZE; |
4091 | for (i = 0; i < page_count; i++) { | 4087 | for (i = 0; i < page_count; i++) { |
4092 | struct page *page = read_cache_page_gfp(mapping, i, | 4088 | struct page *page = shmem_read_mapping_page(mapping, i); |
4093 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
4094 | if (!IS_ERR(page)) { | 4089 | if (!IS_ERR(page)) { |
4095 | char *dst = kmap_atomic(page); | 4090 | char *dst = kmap_atomic(page); |
4096 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); | 4091 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); |
@@ -4151,8 +4146,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4151 | struct page *page; | 4146 | struct page *page; |
4152 | char *dst, *src; | 4147 | char *dst, *src; |
4153 | 4148 | ||
4154 | page = read_cache_page_gfp(mapping, i, | 4149 | page = shmem_read_mapping_page(mapping, i); |
4155 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
4156 | if (IS_ERR(page)) | 4150 | if (IS_ERR(page)) |
4157 | return PTR_ERR(page); | 4151 | return PTR_ERR(page); |
4158 | 4152 | ||