aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/exynos/exynos_drm_gem.c
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2012-06-07 02:59:48 -0400
committerInki Dae <inki.dae@samsung.com>2012-07-26 22:13:56 -0400
commitc62bc752f2d8cbaaa1fd15fa1bcdf10fb90568c0 (patch)
treefa8ce56ead2a0bbaffcff65343cfc85064ebddeb /drivers/gpu/drm/exynos/exynos_drm_gem.c
parentf91f2f331e0d0c640677abbc1a4fa98222ab725a (diff)
drm/exynos: use alloc_page() to allocate pages.
shmem_read_mapping_page_gfp() first tries to allocate pages from page cache so if pages are allocated from page cache then these pages could have valid cache line. after that cpu may read garbage data from cache once gpu operation is completed with allocated pages. so with this patch, Non-contiguous memory allocation request allocates pages from highmem through alloc_page() with GFP_HIGHUSER_MOVABLE. Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'drivers/gpu/drm/exynos/exynos_drm_gem.c')
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index acb9f424eb60..47696bb1284e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -99,25 +99,17 @@ out:
99struct page **exynos_gem_get_pages(struct drm_gem_object *obj, 99struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
100 gfp_t gfpmask) 100 gfp_t gfpmask)
101{ 101{
102 struct inode *inode;
103 struct address_space *mapping;
104 struct page *p, **pages; 102 struct page *p, **pages;
105 int i, npages; 103 int i, npages;
106 104
107 /* This is the shared memory object that backs the GEM resource */
108 inode = obj->filp->f_path.dentry->d_inode;
109 mapping = inode->i_mapping;
110
111 npages = obj->size >> PAGE_SHIFT; 105 npages = obj->size >> PAGE_SHIFT;
112 106
113 pages = drm_malloc_ab(npages, sizeof(struct page *)); 107 pages = drm_malloc_ab(npages, sizeof(struct page *));
114 if (pages == NULL) 108 if (pages == NULL)
115 return ERR_PTR(-ENOMEM); 109 return ERR_PTR(-ENOMEM);
116 110
117 gfpmask |= mapping_gfp_mask(mapping);
118
119 for (i = 0; i < npages; i++) { 111 for (i = 0; i < npages; i++) {
120 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 112 p = alloc_page(gfpmask);
121 if (IS_ERR(p)) 113 if (IS_ERR(p))
122 goto fail; 114 goto fail;
123 pages[i] = p; 115 pages[i] = p;
@@ -127,7 +119,7 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
127 119
128fail: 120fail:
129 while (i--) 121 while (i--)
130 page_cache_release(pages[i]); 122 __free_page(pages[i]);
131 123
132 drm_free_large(pages); 124 drm_free_large(pages);
133 return ERR_PTR(PTR_ERR(p)); 125 return ERR_PTR(PTR_ERR(p));
@@ -189,7 +181,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
189 return -EINVAL; 181 return -EINVAL;
190 } 182 }
191 183
192 pages = exynos_gem_get_pages(obj, GFP_KERNEL); 184 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
193 if (IS_ERR(pages)) { 185 if (IS_ERR(pages)) {
194 DRM_ERROR("failed to get pages.\n"); 186 DRM_ERROR("failed to get pages.\n");
195 return PTR_ERR(pages); 187 return PTR_ERR(pages);