aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r--drivers/gpu/drm/drm_gem.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f7d71190aad5..6adee4c2afc0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
441 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 441 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
442 * from shmem 442 * from shmem
443 * @obj: obj in question 443 * @obj: obj in question
444 * @gfpmask: gfp mask of requested pages 444 *
445 * This reads the page-array of the shmem-backing storage of the given gem
446 * object. An array of pages is returned. If a page is not allocated or
447 * swapped-out, this will allocate/swap-in the required pages. Note that the
448 * whole object is covered by the page-array and pinned in memory.
449 *
450 * Use drm_gem_put_pages() to release the array and unpin all pages.
451 *
452 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
453 * If you require other GFP-masks, you have to do those allocations yourself.
454 *
455 * Note that you are not allowed to change gfp-zones during runtime. That is,
456 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
457 * set during initialization. If you have special zone constraints, set them
458 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
459 * to keep pages in the required zone during swap-in.
445 */ 460 */
446struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) 461struct page **drm_gem_get_pages(struct drm_gem_object *obj)
447{ 462{
448 struct inode *inode;
449 struct address_space *mapping; 463 struct address_space *mapping;
450 struct page *p, **pages; 464 struct page *p, **pages;
451 int i, npages; 465 int i, npages;
452 466
453 /* This is the shared memory object that backs the GEM resource */ 467 /* This is the shared memory object that backs the GEM resource */
454 inode = file_inode(obj->filp); 468 mapping = file_inode(obj->filp)->i_mapping;
455 mapping = inode->i_mapping;
456 469
457 /* We already BUG_ON() for non-page-aligned sizes in 470 /* We already BUG_ON() for non-page-aligned sizes in
458 * drm_gem_object_init(), so we should never hit this unless 471 * drm_gem_object_init(), so we should never hit this unless
@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
466 if (pages == NULL) 479 if (pages == NULL)
467 return ERR_PTR(-ENOMEM); 480 return ERR_PTR(-ENOMEM);
468 481
469 gfpmask |= mapping_gfp_mask(mapping);
470
471 for (i = 0; i < npages; i++) { 482 for (i = 0; i < npages; i++) {
472 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 483 p = shmem_read_mapping_page(mapping, i);
473 if (IS_ERR(p)) 484 if (IS_ERR(p))
474 goto fail; 485 goto fail;
475 pages[i] = p; 486 pages[i] = p;
@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
479 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 490 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
480 * so shmem can relocate pages during swapin if required. 491 * so shmem can relocate pages during swapin if required.
481 */ 492 */
482 BUG_ON((gfpmask & __GFP_DMA32) && 493 BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
483 (page_to_pfn(p) >= 0x00100000UL)); 494 (page_to_pfn(p) >= 0x00100000UL));
484 } 495 }
485 496