aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDavid Herrmann <dh.herrmann@gmail.com>2014-05-25 06:59:47 -0400
committerDavid Herrmann <dh.herrmann@gmail.com>2014-07-07 18:29:53 -0400
commit0cdbe8ac696b5399327f972a1c91263c1a44f1d9 (patch)
tree7ba5705585c5de981a11e41935cd66ad88ce285c /drivers/gpu
parentab5a60c3ee41ff22304e2bcf63c151aa2851df0c (diff)
drm/gem: remove misleading gfp parameter to get_pages()
drm_gem_get_pages() currently allows passing a 'gfp' parameter that is passed to shmem combined with mapping_gfp_mask(). Given that the default mapping_gfp_mask() is GFP_HIGHUSER, it is _very_ unlikely that anyone will ever make use of that parameter. In fact, all drivers currently pass redundant flags or 0. This patch removes the 'gfp' parameter. The only reason to keep it is to remove flags like __GFP_WAIT. But in its current form, it can only be used to add flags. So to remove __GFP_WAIT, you'd have to drop it from the mapping_gfp_mask, which again is stupid as this mask is used by shmem-core for other allocations, too. If any driver ever requires that parameter, we can introduce a new helper that takes the raw 'gfp' parameter. The caller'd be responsible to combine it with mapping_gfp_mask() in a suitable way. The current drm_gem_get_pages() helper would then simply use mapping_gfp_mask() and call the new helper. This is what shmem_read_mapping_pages{_gfp,} does right now. Moreover, the gfp-zone flag-usage is not obvious: If you pass a modified zone, shmem core will WARN() or even BUG(). In other words, the following must be true for 'gfp' passed to shmem_read_mapping_pages_gfp(): gfp_zone(mapping_gfp_mask(mapping)) == gfp_zone(gfp) Add a comment to drm_gem_read_pages() explaining that constraint. Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c8
5 files changed, 27 insertions, 16 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f7d71190aad5..6adee4c2afc0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
441 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 441 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
442 * from shmem 442 * from shmem
443 * @obj: obj in question 443 * @obj: obj in question
444 * @gfpmask: gfp mask of requested pages 444 *
445 * This reads the page-array of the shmem-backing storage of the given gem
446 * object. An array of pages is returned. If a page is not allocated or
447 * swapped-out, this will allocate/swap-in the required pages. Note that the
448 * whole object is covered by the page-array and pinned in memory.
449 *
450 * Use drm_gem_put_pages() to release the array and unpin all pages.
451 *
452 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
453 * If you require other GFP-masks, you have to do those allocations yourself.
454 *
455 * Note that you are not allowed to change gfp-zones during runtime. That is,
456 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
457 * set during initialization. If you have special zone constraints, set them
458 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
459 * to keep pages in the required zone during swap-in.
445 */ 460 */
446struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) 461struct page **drm_gem_get_pages(struct drm_gem_object *obj)
447{ 462{
448 struct inode *inode;
449 struct address_space *mapping; 463 struct address_space *mapping;
450 struct page *p, **pages; 464 struct page *p, **pages;
451 int i, npages; 465 int i, npages;
452 466
453 /* This is the shared memory object that backs the GEM resource */ 467 /* This is the shared memory object that backs the GEM resource */
454 inode = file_inode(obj->filp); 468 mapping = file_inode(obj->filp)->i_mapping;
455 mapping = inode->i_mapping;
456 469
457 /* We already BUG_ON() for non-page-aligned sizes in 470 /* We already BUG_ON() for non-page-aligned sizes in
458 * drm_gem_object_init(), so we should never hit this unless 471 * drm_gem_object_init(), so we should never hit this unless
@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
466 if (pages == NULL) 479 if (pages == NULL)
467 return ERR_PTR(-ENOMEM); 480 return ERR_PTR(-ENOMEM);
468 481
469 gfpmask |= mapping_gfp_mask(mapping);
470
471 for (i = 0; i < npages; i++) { 482 for (i = 0; i < npages; i++) {
472 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 483 p = shmem_read_mapping_page(mapping, i);
473 if (IS_ERR(p)) 484 if (IS_ERR(p))
474 goto fail; 485 goto fail;
475 pages[i] = p; 486 pages[i] = p;
@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
479 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 490 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
480 * so shmem can relocate pages during swapin if required. 491 * so shmem can relocate pages during swapin if required.
481 */ 492 */
482 BUG_ON((gfpmask & __GFP_DMA32) && 493 BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
483 (page_to_pfn(p) >= 0x00100000UL)); 494 (page_to_pfn(p) >= 0x00100000UL));
484 } 495 }
485 496
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 592d205a0089..ce015db59dc6 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
206 206
207 WARN_ON(gt->pages); 207 WARN_ON(gt->pages);
208 208
209 pages = drm_gem_get_pages(&gt->gem, 0); 209 pages = drm_gem_get_pages(&gt->gem);
210 if (IS_ERR(pages)) 210 if (IS_ERR(pages))
211 return PTR_ERR(pages); 211 return PTR_ERR(pages);
212 212
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb8026daebc9..6866879efa3b 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
73 int npages = obj->size >> PAGE_SHIFT; 73 int npages = obj->size >> PAGE_SHIFT;
74 74
75 if (iommu_present(&platform_bus_type)) 75 if (iommu_present(&platform_bus_type))
76 p = drm_gem_get_pages(obj, 0); 76 p = drm_gem_get_pages(obj);
77 else 77 else
78 p = get_pages_vram(obj, npages); 78 p = get_pages_vram(obj, npages);
79 79
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 1331fd538398..7c68c8a18939 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -233,7 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
233 233
234 WARN_ON(omap_obj->pages); 234 WARN_ON(omap_obj->pages);
235 235
236 pages = drm_gem_get_pages(obj, GFP_KERNEL); 236 pages = drm_gem_get_pages(obj);
237 if (IS_ERR(pages)) { 237 if (IS_ERR(pages)) {
238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
239 return PTR_ERR(pages); 239 return PTR_ERR(pages);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c041cd73f399..8044f5fb7c49 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 } 107 }
108} 108}
109 109
110static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 110static int udl_gem_get_pages(struct udl_gem_object *obj)
111{ 111{
112 struct page **pages; 112 struct page **pages;
113 113
114 if (obj->pages) 114 if (obj->pages)
115 return 0; 115 return 0;
116 116
117 pages = drm_gem_get_pages(&obj->base, gfpmask); 117 pages = drm_gem_get_pages(&obj->base);
118 if (IS_ERR(pages)) 118 if (IS_ERR(pages))
119 return PTR_ERR(pages); 119 return PTR_ERR(pages);
120 120
@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj)
147 return 0; 147 return 0;
148 } 148 }
149 149
150 ret = udl_gem_get_pages(obj, GFP_KERNEL); 150 ret = udl_gem_get_pages(obj);
151 if (ret) 151 if (ret)
152 return ret; 152 return ret;
153 153
@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
205 } 205 }
206 gobj = to_udl_bo(obj); 206 gobj = to_udl_bo(obj);
207 207
208 ret = udl_gem_get_pages(gobj, GFP_KERNEL); 208 ret = udl_gem_get_pages(gobj);
209 if (ret) 209 if (ret)
210 goto out; 210 goto out;
211 ret = drm_gem_create_mmap_offset(obj); 211 ret = drm_gem_create_mmap_offset(obj);