aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/omapdrm/omap_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c59
1 files changed, 20 insertions, 39 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 7a4ee4edab5b..a3efac4abd4b 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -222,7 +222,7 @@ static void omap_gem_evict(struct drm_gem_object *obj)
222 * Page Management 222 * Page Management
223 */ 223 */
224 224
225/** ensure backing pages are allocated */ 225/* Ensure backing pages are allocated. */
226static int omap_gem_attach_pages(struct drm_gem_object *obj) 226static int omap_gem_attach_pages(struct drm_gem_object *obj)
227{ 227{
228 struct drm_device *dev = obj->dev; 228 struct drm_device *dev = obj->dev;
@@ -232,7 +232,12 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
232 int i, ret; 232 int i, ret;
233 dma_addr_t *addrs; 233 dma_addr_t *addrs;
234 234
235 WARN_ON(omap_obj->pages); 235 /*
236 * If not using shmem (in which case backing pages don't need to be
237 * allocated) or if pages are already allocated we're done.
238 */
239 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
240 return 0;
236 241
237 pages = drm_gem_get_pages(obj); 242 pages = drm_gem_get_pages(obj);
238 if (IS_ERR(pages)) { 243 if (IS_ERR(pages)) {
@@ -288,29 +293,6 @@ free_pages:
288 return ret; 293 return ret;
289} 294}
290 295
291/* acquire pages when needed (for example, for DMA where physically
292 * contiguous buffer is not required
293 */
294static int __omap_gem_get_pages(struct drm_gem_object *obj,
295 struct page ***pages)
296{
297 struct omap_gem_object *omap_obj = to_omap_bo(obj);
298 int ret = 0;
299
300 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
301 ret = omap_gem_attach_pages(obj);
302 if (ret) {
303 dev_err(obj->dev->dev, "could not attach pages\n");
304 return ret;
305 }
306 }
307
308 /* TODO: even phys-contig.. we should have a list of pages? */
309 *pages = omap_obj->pages;
310
311 return 0;
312}
313
314/** release backing pages */ 296/** release backing pages */
315static void omap_gem_detach_pages(struct drm_gem_object *obj) 297static void omap_gem_detach_pages(struct drm_gem_object *obj)
316{ 298{
@@ -522,7 +504,6 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
522 struct drm_gem_object *obj = vma->vm_private_data; 504 struct drm_gem_object *obj = vma->vm_private_data;
523 struct omap_gem_object *omap_obj = to_omap_bo(obj); 505 struct omap_gem_object *omap_obj = to_omap_bo(obj);
524 struct drm_device *dev = obj->dev; 506 struct drm_device *dev = obj->dev;
525 struct page **pages;
526 int err; 507 int err;
527 vm_fault_t ret; 508 vm_fault_t ret;
528 509
@@ -532,7 +513,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
532 mutex_lock(&dev->struct_mutex); 513 mutex_lock(&dev->struct_mutex);
533 514
534 /* if a shmem backed object, make sure we have pages attached now */ 515 /* if a shmem backed object, make sure we have pages attached now */
535 err = __omap_gem_get_pages(obj, &pages); 516 err = omap_gem_attach_pages(obj);
536 if (err) { 517 if (err) {
537 ret = vmf_error(err); 518 ret = vmf_error(err);
538 goto fail; 519 goto fail;
@@ -689,12 +670,12 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
689 670
690 /* if we aren't mapped yet, we don't need to do anything */ 671 /* if we aren't mapped yet, we don't need to do anything */
691 if (omap_obj->block) { 672 if (omap_obj->block) {
692 struct page **pages; 673 ret = omap_gem_attach_pages(obj);
693
694 ret = __omap_gem_get_pages(obj, &pages);
695 if (ret) 674 if (ret)
696 goto fail; 675 goto fail;
697 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); 676
677 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
678 roll, true);
698 if (ret) 679 if (ret)
699 dev_err(obj->dev->dev, "could not repin: %d\n", ret); 680 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
700 } 681 }
@@ -805,14 +786,13 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
805 786
806 if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) { 787 if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
807 if (omap_obj->dma_addr_cnt == 0) { 788 if (omap_obj->dma_addr_cnt == 0) {
808 struct page **pages;
809 u32 npages = obj->size >> PAGE_SHIFT; 789 u32 npages = obj->size >> PAGE_SHIFT;
810 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 790 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
811 struct tiler_block *block; 791 struct tiler_block *block;
812 792
813 BUG_ON(omap_obj->block); 793 BUG_ON(omap_obj->block);
814 794
815 ret = __omap_gem_get_pages(obj, &pages); 795 ret = omap_gem_attach_pages(obj);
816 if (ret) 796 if (ret)
817 goto fail; 797 goto fail;
818 798
@@ -832,7 +812,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
832 } 812 }
833 813
834 /* TODO: enable async refill.. */ 814 /* TODO: enable async refill.. */
835 ret = tiler_pin(block, pages, npages, 815 ret = tiler_pin(block, omap_obj->pages, npages,
836 omap_obj->roll, true); 816 omap_obj->roll, true);
837 if (ret) { 817 if (ret) {
838 tiler_release(block); 818 tiler_release(block);
@@ -941,16 +921,18 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
941int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 921int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
942 bool remap) 922 bool remap)
943{ 923{
924 struct omap_gem_object *omap_obj = to_omap_bo(obj);
944 int ret; 925 int ret;
926
945 if (!remap) { 927 if (!remap) {
946 struct omap_gem_object *omap_obj = to_omap_bo(obj);
947 if (!omap_obj->pages) 928 if (!omap_obj->pages)
948 return -ENOMEM; 929 return -ENOMEM;
949 *pages = omap_obj->pages; 930 *pages = omap_obj->pages;
950 return 0; 931 return 0;
951 } 932 }
952 mutex_lock(&obj->dev->struct_mutex); 933 mutex_lock(&obj->dev->struct_mutex);
953 ret = __omap_gem_get_pages(obj, pages); 934 ret = omap_gem_attach_pages(obj);
935 *pages = omap_obj->pages;
954 mutex_unlock(&obj->dev->struct_mutex); 936 mutex_unlock(&obj->dev->struct_mutex);
955 return ret; 937 return ret;
956} 938}
@@ -975,13 +957,12 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
975 struct omap_gem_object *omap_obj = to_omap_bo(obj); 957 struct omap_gem_object *omap_obj = to_omap_bo(obj);
976 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 958 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
977 if (!omap_obj->vaddr) { 959 if (!omap_obj->vaddr) {
978 struct page **pages;
979 int ret; 960 int ret;
980 961
981 ret = __omap_gem_get_pages(obj, &pages); 962 ret = omap_gem_attach_pages(obj);
982 if (ret) 963 if (ret)
983 return ERR_PTR(ret); 964 return ERR_PTR(ret);
984 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 965 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
985 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 966 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
986 } 967 }
987 return omap_obj->vaddr; 968 return omap_obj->vaddr;