diff options
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/gma500/gtt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/udl/udl_gem.c | 8 | ||||
-rw-r--r-- | include/drm/drmP.h | 2 |
6 files changed, 28 insertions, 17 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f7d71190aad5..6adee4c2afc0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset); | |||
441 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object | 441 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
442 | * from shmem | 442 | * from shmem |
443 | * @obj: obj in question | 443 | * @obj: obj in question |
444 | * @gfpmask: gfp mask of requested pages | 444 | * |
445 | * This reads the page-array of the shmem-backing storage of the given gem | ||
446 | * object. An array of pages is returned. If a page is not allocated or | ||
447 | * swapped-out, this will allocate/swap-in the required pages. Note that the | ||
448 | * whole object is covered by the page-array and pinned in memory. | ||
449 | * | ||
450 | * Use drm_gem_put_pages() to release the array and unpin all pages. | ||
451 | * | ||
452 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). | ||
453 | * If you require other GFP-masks, you have to do those allocations yourself. | ||
454 | * | ||
455 | * Note that you are not allowed to change gfp-zones during runtime. That is, | ||
456 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as | ||
457 | * set during initialization. If you have special zone constraints, set them | ||
458 | * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care | ||
459 | * to keep pages in the required zone during swap-in. | ||
445 | */ | 460 | */ |
446 | struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) | 461 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
447 | { | 462 | { |
448 | struct inode *inode; | ||
449 | struct address_space *mapping; | 463 | struct address_space *mapping; |
450 | struct page *p, **pages; | 464 | struct page *p, **pages; |
451 | int i, npages; | 465 | int i, npages; |
452 | 466 | ||
453 | /* This is the shared memory object that backs the GEM resource */ | 467 | /* This is the shared memory object that backs the GEM resource */ |
454 | inode = file_inode(obj->filp); | 468 | mapping = file_inode(obj->filp)->i_mapping; |
455 | mapping = inode->i_mapping; | ||
456 | 469 | ||
457 | /* We already BUG_ON() for non-page-aligned sizes in | 470 | /* We already BUG_ON() for non-page-aligned sizes in |
458 | * drm_gem_object_init(), so we should never hit this unless | 471 | * drm_gem_object_init(), so we should never hit this unless |
@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) | |||
466 | if (pages == NULL) | 479 | if (pages == NULL) |
467 | return ERR_PTR(-ENOMEM); | 480 | return ERR_PTR(-ENOMEM); |
468 | 481 | ||
469 | gfpmask |= mapping_gfp_mask(mapping); | ||
470 | |||
471 | for (i = 0; i < npages; i++) { | 482 | for (i = 0; i < npages; i++) { |
472 | p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | 483 | p = shmem_read_mapping_page(mapping, i); |
473 | if (IS_ERR(p)) | 484 | if (IS_ERR(p)) |
474 | goto fail; | 485 | goto fail; |
475 | pages[i] = p; | 486 | pages[i] = p; |
@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) | |||
479 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) | 490 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) |
480 | * so shmem can relocate pages during swapin if required. | 491 | * so shmem can relocate pages during swapin if required. |
481 | */ | 492 | */ |
482 | BUG_ON((gfpmask & __GFP_DMA32) && | 493 | BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) && |
483 | (page_to_pfn(p) >= 0x00100000UL)); | 494 | (page_to_pfn(p) >= 0x00100000UL)); |
484 | } | 495 | } |
485 | 496 | ||
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 592d205a0089..ce015db59dc6 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c | |||
@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt) | |||
206 | 206 | ||
207 | WARN_ON(gt->pages); | 207 | WARN_ON(gt->pages); |
208 | 208 | ||
209 | pages = drm_gem_get_pages(>->gem, 0); | 209 | pages = drm_gem_get_pages(>->gem); |
210 | if (IS_ERR(pages)) | 210 | if (IS_ERR(pages)) |
211 | return PTR_ERR(pages); | 211 | return PTR_ERR(pages); |
212 | 212 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index bb8026daebc9..6866879efa3b 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj) | |||
73 | int npages = obj->size >> PAGE_SHIFT; | 73 | int npages = obj->size >> PAGE_SHIFT; |
74 | 74 | ||
75 | if (iommu_present(&platform_bus_type)) | 75 | if (iommu_present(&platform_bus_type)) |
76 | p = drm_gem_get_pages(obj, 0); | 76 | p = drm_gem_get_pages(obj); |
77 | else | 77 | else |
78 | p = get_pages_vram(obj, npages); | 78 | p = get_pages_vram(obj, npages); |
79 | 79 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 1331fd538398..7c68c8a18939 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -233,7 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) | |||
233 | 233 | ||
234 | WARN_ON(omap_obj->pages); | 234 | WARN_ON(omap_obj->pages); |
235 | 235 | ||
236 | pages = drm_gem_get_pages(obj, GFP_KERNEL); | 236 | pages = drm_gem_get_pages(obj); |
237 | if (IS_ERR(pages)) { | 237 | if (IS_ERR(pages)) { |
238 | dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); | 238 | dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); |
239 | return PTR_ERR(pages); | 239 | return PTR_ERR(pages); |
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index c041cd73f399..8044f5fb7c49 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
110 | static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) | 110 | static int udl_gem_get_pages(struct udl_gem_object *obj) |
111 | { | 111 | { |
112 | struct page **pages; | 112 | struct page **pages; |
113 | 113 | ||
114 | if (obj->pages) | 114 | if (obj->pages) |
115 | return 0; | 115 | return 0; |
116 | 116 | ||
117 | pages = drm_gem_get_pages(&obj->base, gfpmask); | 117 | pages = drm_gem_get_pages(&obj->base); |
118 | if (IS_ERR(pages)) | 118 | if (IS_ERR(pages)) |
119 | return PTR_ERR(pages); | 119 | return PTR_ERR(pages); |
120 | 120 | ||
@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj) | |||
147 | return 0; | 147 | return 0; |
148 | } | 148 | } |
149 | 149 | ||
150 | ret = udl_gem_get_pages(obj, GFP_KERNEL); | 150 | ret = udl_gem_get_pages(obj); |
151 | if (ret) | 151 | if (ret) |
152 | return ret; | 152 | return ret; |
153 | 153 | ||
@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, | |||
205 | } | 205 | } |
206 | gobj = to_udl_bo(obj); | 206 | gobj = to_udl_bo(obj); |
207 | 207 | ||
208 | ret = udl_gem_get_pages(gobj, GFP_KERNEL); | 208 | ret = udl_gem_get_pages(gobj); |
209 | if (ret) | 209 | if (ret) |
210 | goto out; | 210 | goto out; |
211 | ret = drm_gem_create_mmap_offset(obj); | 211 | ret = drm_gem_create_mmap_offset(obj); |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 8af71a8e2c00..57c07cd89a6a 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1574,7 +1574,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj); | |||
1574 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj); | 1574 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj); |
1575 | int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); | 1575 | int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); |
1576 | 1576 | ||
1577 | struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | 1577 | struct page **drm_gem_get_pages(struct drm_gem_object *obj); |
1578 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | 1578 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
1579 | bool dirty, bool accessed); | 1579 | bool dirty, bool accessed); |
1580 | 1580 | ||