aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-06-27 19:18:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-06-27 21:00:13 -0400
commit5949eac4d9b5bf936c12cb7ec3a09084c1326834 (patch)
tree1cb391ffd8ebab379e4c37953923baeeff9d3b65
parent3142b651ad2232cf0e375c291ee4b893c8559df5 (diff)
drm/i915: use shmem_read_mapping_page
Soon tmpfs will stop supporting ->readpage and read_cache_page_gfp(): once "tmpfs: add shmem_read_mapping_page_gfp" has been applied, this patch can be applied to ease the transition. Make i915_gem_object_get_pages_gtt() use shmem_read_mapping_page_gfp() in the one place it's needed; elsewhere use shmem_read_mapping_page(), with the mapping's gfp_mask properly initialized. Forget about __GFP_COLD: since tmpfs initializes its pages with memset, asking for a cold page is counter-productive. Include linux/shmem_fs.h also in drm_gem.c: with shmem_file_setup() now declared there too, we shall remove the prototype from linux/mm.h later. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Keith Packard <keithp@keithp.com> Cc: Dave Airlie <airlied@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/gpu/drm/drm_gem.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c31
2 files changed, 15 insertions, 17 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 74e4ff578017..4012fe423460 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/mman.h> 35#include <linux/mman.h>
36#include <linux/pagemap.h> 36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
37#include "drmP.h" 38#include "drmP.h"
38 39
39/** @file drm_gem.c 40/** @file drm_gem.c
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c6389de53161..fa560ceba667 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h" 32#include "i915_trace.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/shmem_fs.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/swap.h> 36#include <linux/swap.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
359 if ((page_offset + remain) > PAGE_SIZE) 360 if ((page_offset + remain) > PAGE_SIZE)
360 page_length = PAGE_SIZE - page_offset; 361 page_length = PAGE_SIZE - page_offset;
361 362
362 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 363 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
363 GFP_HIGHUSER | __GFP_RECLAIMABLE);
364 if (IS_ERR(page)) 364 if (IS_ERR(page))
365 return PTR_ERR(page); 365 return PTR_ERR(page);
366 366
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
463 if ((data_page_offset + page_length) > PAGE_SIZE) 463 if ((data_page_offset + page_length) > PAGE_SIZE)
464 page_length = PAGE_SIZE - data_page_offset; 464 page_length = PAGE_SIZE - data_page_offset;
465 465
466 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 466 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
467 GFP_HIGHUSER | __GFP_RECLAIMABLE);
468 if (IS_ERR(page)) { 467 if (IS_ERR(page)) {
469 ret = PTR_ERR(page); 468 ret = PTR_ERR(page);
470 goto out; 469 goto out;
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
797 if ((page_offset + remain) > PAGE_SIZE) 796 if ((page_offset + remain) > PAGE_SIZE)
798 page_length = PAGE_SIZE - page_offset; 797 page_length = PAGE_SIZE - page_offset;
799 798
800 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 799 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
801 GFP_HIGHUSER | __GFP_RECLAIMABLE);
802 if (IS_ERR(page)) 800 if (IS_ERR(page))
803 return PTR_ERR(page); 801 return PTR_ERR(page);
804 802
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
907 if ((data_page_offset + page_length) > PAGE_SIZE) 905 if ((data_page_offset + page_length) > PAGE_SIZE)
908 page_length = PAGE_SIZE - data_page_offset; 906 page_length = PAGE_SIZE - data_page_offset;
909 907
910 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 908 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
911 GFP_HIGHUSER | __GFP_RECLAIMABLE);
912 if (IS_ERR(page)) { 909 if (IS_ERR(page)) {
913 ret = PTR_ERR(page); 910 ret = PTR_ERR(page);
914 goto out; 911 goto out;
@@ -1558,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1558 1555
1559 inode = obj->base.filp->f_path.dentry->d_inode; 1556 inode = obj->base.filp->f_path.dentry->d_inode;
1560 mapping = inode->i_mapping; 1557 mapping = inode->i_mapping;
1558 gfpmask |= mapping_gfp_mask(mapping);
1559
1561 for (i = 0; i < page_count; i++) { 1560 for (i = 0; i < page_count; i++) {
1562 page = read_cache_page_gfp(mapping, i, 1561 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1563 GFP_HIGHUSER |
1564 __GFP_COLD |
1565 __GFP_RECLAIMABLE |
1566 gfpmask);
1567 if (IS_ERR(page)) 1562 if (IS_ERR(page))
1568 goto err_pages; 1563 goto err_pages;
1569 1564
@@ -3565,6 +3560,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3565{ 3560{
3566 struct drm_i915_private *dev_priv = dev->dev_private; 3561 struct drm_i915_private *dev_priv = dev->dev_private;
3567 struct drm_i915_gem_object *obj; 3562 struct drm_i915_gem_object *obj;
3563 struct address_space *mapping;
3568 3564
3569 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3565 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3570 if (obj == NULL) 3566 if (obj == NULL)
@@ -3575,6 +3571,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3575 return NULL; 3571 return NULL;
3576 } 3572 }
3577 3573
3574 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3575 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3576
3578 i915_gem_info_add_obj(dev_priv, size); 3577 i915_gem_info_add_obj(dev_priv, size);
3579 3578
3580 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3579 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3950,8 +3949,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
3950 3949
3951 page_count = obj->base.size / PAGE_SIZE; 3950 page_count = obj->base.size / PAGE_SIZE;
3952 for (i = 0; i < page_count; i++) { 3951 for (i = 0; i < page_count; i++) {
3953 struct page *page = read_cache_page_gfp(mapping, i, 3952 struct page *page = shmem_read_mapping_page(mapping, i);
3954 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3955 if (!IS_ERR(page)) { 3953 if (!IS_ERR(page)) {
3956 char *dst = kmap_atomic(page); 3954 char *dst = kmap_atomic(page);
3957 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); 3955 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4012,8 +4010,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4012 struct page *page; 4010 struct page *page;
4013 char *dst, *src; 4011 char *dst, *src;
4014 4012
4015 page = read_cache_page_gfp(mapping, i, 4013 page = shmem_read_mapping_page(mapping, i);
4016 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4017 if (IS_ERR(page)) 4014 if (IS_ERR(page))
4018 return PTR_ERR(page); 4015 return PTR_ERR(page);
4019 4016