aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-07-07 21:08:31 -0400
committerDave Airlie <airlied@redhat.com>2014-07-07 21:08:31 -0400
commitf71c5d9dd22f4d6b771cdb591050c84946a3e356 (patch)
treea56b10220859bad7b73459eabf5c04409163a261
parentafa95e7403298110943bc2dc0ab25f8b42b6334c (diff)
parentd2c87e2d2377966450cfb4271694c77dac615f98 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~dvdhrm/linux into drm-next
* 'drm-next' of git://people.freedesktop.org/~dvdhrm/linux: drm/omap: remove null test before kfree drm/bochs: replace ALIGN(PAGE_SIZE) by PAGE_ALIGN drm/ttm: recognize ARM arch in ioprot handler drm: enable render-nodes by default drm/ttm: remove declaration of ttm_tt_cache_flush drm/gem: remove misleading gfp parameter to get_pages() drm/omap: use __GFP_DMA32 for shmem-backed gem drm/i915: use shmem helpers if possible Conflicts: drivers/gpu/drm/drm_stub.c
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c23
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c8
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/ttm/ttm_bo_driver.h12
11 files changed, 41 insertions, 56 deletions
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index b9a695d92792..1728a1b0b813 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -387,7 +387,7 @@ int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
387 387
388 *obj = NULL; 388 *obj = NULL;
389 389
390 size = ALIGN(size, PAGE_SIZE); 390 size = PAGE_ALIGN(size);
391 if (size == 0) 391 if (size == 0)
392 return -EINVAL; 392 return -EINVAL;
393 393
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f7d71190aad5..6adee4c2afc0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
441 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 441 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
442 * from shmem 442 * from shmem
443 * @obj: obj in question 443 * @obj: obj in question
444 * @gfpmask: gfp mask of requested pages 444 *
445 * This reads the page-array of the shmem-backing storage of the given gem
446 * object. An array of pages is returned. If a page is not allocated or
447 * swapped-out, this will allocate/swap-in the required pages. Note that the
448 * whole object is covered by the page-array and pinned in memory.
449 *
450 * Use drm_gem_put_pages() to release the array and unpin all pages.
451 *
452 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
453 * If you require other GFP-masks, you have to do those allocations yourself.
454 *
455 * Note that you are not allowed to change gfp-zones during runtime. That is,
456 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
457 * set during initialization. If you have special zone constraints, set them
458 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
459 * to keep pages in the required zone during swap-in.
445 */ 460 */
446struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) 461struct page **drm_gem_get_pages(struct drm_gem_object *obj)
447{ 462{
448 struct inode *inode;
449 struct address_space *mapping; 463 struct address_space *mapping;
450 struct page *p, **pages; 464 struct page *p, **pages;
451 int i, npages; 465 int i, npages;
452 466
453 /* This is the shared memory object that backs the GEM resource */ 467 /* This is the shared memory object that backs the GEM resource */
454 inode = file_inode(obj->filp); 468 mapping = file_inode(obj->filp)->i_mapping;
455 mapping = inode->i_mapping;
456 469
457 /* We already BUG_ON() for non-page-aligned sizes in 470 /* We already BUG_ON() for non-page-aligned sizes in
458 * drm_gem_object_init(), so we should never hit this unless 471 * drm_gem_object_init(), so we should never hit this unless
@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
466 if (pages == NULL) 479 if (pages == NULL)
467 return ERR_PTR(-ENOMEM); 480 return ERR_PTR(-ENOMEM);
468 481
469 gfpmask |= mapping_gfp_mask(mapping);
470
471 for (i = 0; i < npages; i++) { 482 for (i = 0; i < npages; i++) {
472 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 483 p = shmem_read_mapping_page(mapping, i);
473 if (IS_ERR(p)) 484 if (IS_ERR(p))
474 goto fail; 485 goto fail;
475 pages[i] = p; 486 pages[i] = p;
@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
479 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 490 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
480 * so shmem can relocate pages during swapin if required. 491 * so shmem can relocate pages during swapin if required.
481 */ 492 */
482 BUG_ON((gfpmask & __GFP_DMA32) && 493 BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
483 (page_to_pfn(p) >= 0x00100000UL)); 494 (page_to_pfn(p) >= 0x00100000UL));
484 } 495 }
485 496
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f4148bfcb26d..4e862b467740 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -37,8 +37,6 @@
37unsigned int drm_debug = 0; /* 1 to enable debug output */ 37unsigned int drm_debug = 0; /* 1 to enable debug output */
38EXPORT_SYMBOL(drm_debug); 38EXPORT_SYMBOL(drm_debug);
39 39
40unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
41
42/* 1 to allow user space to request universal planes (experimental) */ 40/* 1 to allow user space to request universal planes (experimental) */
43unsigned int drm_universal_planes = 0; 41unsigned int drm_universal_planes = 0;
44 42
@@ -56,13 +54,11 @@ MODULE_AUTHOR(CORE_AUTHOR);
56MODULE_DESCRIPTION(CORE_DESC); 54MODULE_DESCRIPTION(CORE_DESC);
57MODULE_LICENSE("GPL and additional rights"); 55MODULE_LICENSE("GPL and additional rights");
58MODULE_PARM_DESC(debug, "Enable debug output"); 56MODULE_PARM_DESC(debug, "Enable debug output");
59MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
60MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); 57MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
61MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 58MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
62MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 59MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
63 60
64module_param_named(debug, drm_debug, int, 0600); 61module_param_named(debug, drm_debug, int, 0600);
65module_param_named(rnodes, drm_rnodes, int, 0600);
66module_param_named(universal_planes, drm_universal_planes, int, 0600); 62module_param_named(universal_planes, drm_universal_planes, int, 0600);
67module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 63module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
68module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 64module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
@@ -584,7 +580,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
584 goto err_minors; 580 goto err_minors;
585 } 581 }
586 582
587 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { 583 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
588 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 584 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
589 if (ret) 585 if (ret)
590 goto err_minors; 586 goto err_minors;
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 592d205a0089..ce015db59dc6 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
206 206
207 WARN_ON(gt->pages); 207 WARN_ON(gt->pages);
208 208
209 pages = drm_gem_get_pages(&gt->gem, 0); 209 pages = drm_gem_get_pages(&gt->gem);
210 if (IS_ERR(pages)) 210 if (IS_ERR(pages))
211 return PTR_ERR(pages); 211 return PTR_ERR(pages);
212 212
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f36126383d26..d86b77e905a2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2059,16 +2059,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2059 * our own buffer, now let the real VM do its job and 2059 * our own buffer, now let the real VM do its job and
2060 * go down in flames if truly OOM. 2060 * go down in flames if truly OOM.
2061 */ 2061 */
2062 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
2063 gfp |= __GFP_IO | __GFP_WAIT;
2064
2065 i915_gem_shrink_all(dev_priv); 2062 i915_gem_shrink_all(dev_priv);
2066 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2063 page = shmem_read_mapping_page(mapping, i);
2067 if (IS_ERR(page)) 2064 if (IS_ERR(page))
2068 goto err_pages; 2065 goto err_pages;
2069
2070 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2071 gfp &= ~(__GFP_IO | __GFP_WAIT);
2072 } 2066 }
2073#ifdef CONFIG_SWIOTLB 2067#ifdef CONFIG_SWIOTLB
2074 if (swiotlb_nr_tbl()) { 2068 if (swiotlb_nr_tbl()) {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 690d7e7b6d1e..713722b0ba78 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
73 int npages = obj->size >> PAGE_SHIFT; 73 int npages = obj->size >> PAGE_SHIFT;
74 74
75 if (iommu_present(&platform_bus_type)) 75 if (iommu_present(&platform_bus_type))
76 p = drm_gem_get_pages(obj, 0); 76 p = drm_gem_get_pages(obj);
77 else 77 else
78 p = get_pages_vram(obj, npages); 78 p = get_pages_vram(obj, npages);
79 79
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 95dbce286a41..5c3670017a4a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -233,11 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
233 233
234 WARN_ON(omap_obj->pages); 234 WARN_ON(omap_obj->pages);
235 235
236 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the 236 pages = drm_gem_get_pages(obj);
237 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
238 * we actually want CMA memory for it all anyways..
239 */
240 pages = drm_gem_get_pages(obj, GFP_KERNEL);
241 if (IS_ERR(pages)) { 237 if (IS_ERR(pages)) {
242 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
243 return PTR_ERR(pages); 239 return PTR_ERR(pages);
@@ -1183,9 +1179,7 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1183 } 1179 }
1184 } 1180 }
1185 spin_unlock(&sync_lock); 1181 spin_unlock(&sync_lock);
1186 1182 kfree(waiter);
1187 if (waiter)
1188 kfree(waiter);
1189 } 1183 }
1190 return ret; 1184 return ret;
1191} 1185}
@@ -1347,6 +1341,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1347 struct omap_drm_private *priv = dev->dev_private; 1341 struct omap_drm_private *priv = dev->dev_private;
1348 struct omap_gem_object *omap_obj; 1342 struct omap_gem_object *omap_obj;
1349 struct drm_gem_object *obj = NULL; 1343 struct drm_gem_object *obj = NULL;
1344 struct address_space *mapping;
1350 size_t size; 1345 size_t size;
1351 int ret; 1346 int ret;
1352 1347
@@ -1404,14 +1399,16 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1404 omap_obj->height = gsize.tiled.height; 1399 omap_obj->height = gsize.tiled.height;
1405 } 1400 }
1406 1401
1407 ret = 0; 1402 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
1408 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1409 drm_gem_private_object_init(dev, obj, size); 1403 drm_gem_private_object_init(dev, obj, size);
1410 else 1404 } else {
1411 ret = drm_gem_object_init(dev, obj, size); 1405 ret = drm_gem_object_init(dev, obj, size);
1406 if (ret)
1407 goto fail;
1412 1408
1413 if (ret) 1409 mapping = file_inode(obj->filp)->i_mapping;
1414 goto fail; 1410 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1411 }
1415 1412
1416 return obj; 1413 return obj;
1417 1414
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1df856f78568..30e5d90cb7bc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
500 pgprot_val(tmp) |= _PAGE_GUARDED; 500 pgprot_val(tmp) |= _PAGE_GUARDED;
501 } 501 }
502#endif 502#endif
503#if defined(__ia64__) 503#if defined(__ia64__) || defined(__arm__)
504 if (caching_flags & TTM_PL_FLAG_WC) 504 if (caching_flags & TTM_PL_FLAG_WC)
505 tmp = pgprot_writecombine(tmp); 505 tmp = pgprot_writecombine(tmp);
506 else 506 else
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c041cd73f399..8044f5fb7c49 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 } 107 }
108} 108}
109 109
110static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 110static int udl_gem_get_pages(struct udl_gem_object *obj)
111{ 111{
112 struct page **pages; 112 struct page **pages;
113 113
114 if (obj->pages) 114 if (obj->pages)
115 return 0; 115 return 0;
116 116
117 pages = drm_gem_get_pages(&obj->base, gfpmask); 117 pages = drm_gem_get_pages(&obj->base);
118 if (IS_ERR(pages)) 118 if (IS_ERR(pages))
119 return PTR_ERR(pages); 119 return PTR_ERR(pages);
120 120
@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj)
147 return 0; 147 return 0;
148 } 148 }
149 149
150 ret = udl_gem_get_pages(obj, GFP_KERNEL); 150 ret = udl_gem_get_pages(obj);
151 if (ret) 151 if (ret)
152 return ret; 152 return ret;
153 153
@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
205 } 205 }
206 gobj = to_udl_bo(obj); 206 gobj = to_udl_bo(obj);
207 207
208 ret = udl_gem_get_pages(gobj, GFP_KERNEL); 208 ret = udl_gem_get_pages(gobj);
209 if (ret) 209 if (ret)
210 goto out; 210 goto out;
211 ret = drm_gem_create_mmap_offset(obj); 211 ret = drm_gem_create_mmap_offset(obj);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 57ecc421b19c..a1344793f4a9 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1395,7 +1395,6 @@ extern void drm_master_put(struct drm_master **master);
1395extern void drm_put_dev(struct drm_device *dev); 1395extern void drm_put_dev(struct drm_device *dev);
1396extern void drm_unplug_dev(struct drm_device *dev); 1396extern void drm_unplug_dev(struct drm_device *dev);
1397extern unsigned int drm_debug; 1397extern unsigned int drm_debug;
1398extern unsigned int drm_rnodes;
1399extern unsigned int drm_universal_planes; 1398extern unsigned int drm_universal_planes;
1400 1399
1401extern unsigned int drm_vblank_offdelay; 1400extern unsigned int drm_vblank_offdelay;
@@ -1585,7 +1584,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
1585int drm_gem_create_mmap_offset(struct drm_gem_object *obj); 1584int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
1586int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); 1585int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
1587 1586
1588struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 1587struct page **drm_gem_get_pages(struct drm_gem_object *obj);
1589void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 1588void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
1590 bool dirty, bool accessed); 1589 bool dirty, bool accessed);
1591 1590
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index a5183da3ef92..e3f8c99a8a9d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -653,18 +653,6 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm);
653extern int ttm_tt_swapin(struct ttm_tt *ttm); 653extern int ttm_tt_swapin(struct ttm_tt *ttm);
654 654
655/** 655/**
656 * ttm_tt_cache_flush:
657 *
658 * @pages: An array of pointers to struct page:s to flush.
659 * @num_pages: Number of pages to flush.
660 *
661 * Flush the data of the indicated pages from the cpu caches.
662 * This is used when changing caching attributes of the pages from
663 * cache-coherent.
664 */
665extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
666
667/**
668 * ttm_tt_set_placement_caching: 656 * ttm_tt_set_placement_caching:
669 * 657 *
670 * @ttm A struct ttm_tt the backing pages of which will change caching policy. 658 * @ttm A struct ttm_tt the backing pages of which will change caching policy.