aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Clark <robdclark@gmail.com>2015-03-03 15:04:25 -0500
committerRob Clark <robdclark@gmail.com>2015-04-01 19:29:33 -0400
commit072f1f9168ed67d6ddc94bb76b1dfc04795062b4 (patch)
treeb817aa6b4980b0fedc19e34007429a7b340cbc72
parent5bf9c0b614542d69fb9a8681a0411715cc3e8ba8 (diff)
drm/msm: add support for "stolen" mem
Add support to use the VRAM carveout (if specified in dtb) for fbdev scanout buffer. This allows drm/msm to take over a bootloader splash- screen, and avoids corruption on screen that results if the kernel uses memory that is still being scanned out for itself. Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c44
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
4 files changed, 66 insertions, 11 deletions
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b250610e6393..0c38f34066e5 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -182,21 +182,57 @@ static int get_mdp_ver(struct platform_device *pdev)
182 return 4; 182 return 4;
183} 183}
184 184
185#include <linux/of_address.h>
186
185static int msm_init_vram(struct drm_device *dev) 187static int msm_init_vram(struct drm_device *dev)
186{ 188{
187 struct msm_drm_private *priv = dev->dev_private; 189 struct msm_drm_private *priv = dev->dev_private;
190 unsigned long size = 0;
191 int ret = 0;
192
193#ifdef CONFIG_OF
194 /* In the device-tree world, we could have a 'memory-region'
195 * phandle, which gives us a link to our "vram". Allocating
196 * is all nicely abstracted behind the dma api, but we need
197 * to know the entire size to allocate it all in one go. There
198 * are two cases:
199 * 1) device with no IOMMU, in which case we need exclusive
200 * access to a VRAM carveout big enough for all gpu
201 * buffers
202 * 2) device with IOMMU, but where the bootloader puts up
203 * a splash screen. In this case, the VRAM carveout
204 * need only be large enough for fbdev fb. But we need
205 * exclusive access to the buffer to avoid the kernel
206 * using those pages for other purposes (which appears
207 * as corruption on screen before we have a chance to
208 * load and do initial modeset)
209 */
210 struct device_node *node;
211
212 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
213 if (node) {
214 struct resource r;
215 ret = of_address_to_resource(node, 0, &r);
216 if (ret)
217 return ret;
218 size = r.end - r.start;
219 DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
220 } else
221#endif
188 222
189 /* if we have no IOMMU, then we need to use carveout allocator. 223 /* if we have no IOMMU, then we need to use carveout allocator.
190 * Grab the entire CMA chunk carved out in early startup in 224 * Grab the entire CMA chunk carved out in early startup in
191 * mach-msm: 225 * mach-msm:
192 */ 226 */
193 if (!iommu_present(&platform_bus_type)) { 227 if (!iommu_present(&platform_bus_type)) {
228 DRM_INFO("using %s VRAM carveout\n", vram);
229 size = memparse(vram, NULL);
230 }
231
232 if (size) {
194 DEFINE_DMA_ATTRS(attrs); 233 DEFINE_DMA_ATTRS(attrs);
195 unsigned long size;
196 void *p; 234 void *p;
197 235
198 DBG("using %s VRAM carveout", vram);
199 size = memparse(vram, NULL);
200 priv->vram.size = size; 236 priv->vram.size = size;
201 237
202 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); 238 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
@@ -220,7 +256,7 @@ static int msm_init_vram(struct drm_device *dev)
220 (uint32_t)(priv->vram.paddr + size)); 256 (uint32_t)(priv->vram.paddr + size));
221 } 257 }
222 258
223 return 0; 259 return ret;
224} 260}
225 261
226static int msm_load(struct drm_device *dev, unsigned long flags) 262static int msm_load(struct drm_device *dev, unsigned long flags)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index df60f65728ff..95f6532df02d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -110,7 +110,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
110 size = mode_cmd.pitches[0] * mode_cmd.height; 110 size = mode_cmd.pitches[0] * mode_cmd.height;
111 DBG("allocating %d bytes for fb %d", size, dev->primary->index); 111 DBG("allocating %d bytes for fb %d", size, dev->primary->index);
112 mutex_lock(&dev->struct_mutex); 112 mutex_lock(&dev->struct_mutex);
113 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC); 113 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
114 MSM_BO_WC | MSM_BO_STOLEN);
114 mutex_unlock(&dev->struct_mutex); 115 mutex_unlock(&dev->struct_mutex);
115 if (IS_ERR(fbdev->bo)) { 116 if (IS_ERR(fbdev->bo)) {
116 ret = PTR_ERR(fbdev->bo); 117 ret = PTR_ERR(fbdev->bo);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 49dea4fb55ac..479d8af72bcb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,12 @@ static dma_addr_t physaddr(struct drm_gem_object *obj)
32 priv->vram.paddr; 32 priv->vram.paddr;
33} 33}
34 34
35static bool use_pages(struct drm_gem_object *obj)
36{
37 struct msm_gem_object *msm_obj = to_msm_bo(obj);
38 return !msm_obj->vram_node;
39}
40
35/* allocate pages from VRAM carveout, used when no IOMMU: */ 41/* allocate pages from VRAM carveout, used when no IOMMU: */
36static struct page **get_pages_vram(struct drm_gem_object *obj, 42static struct page **get_pages_vram(struct drm_gem_object *obj,
37 int npages) 43 int npages)
@@ -72,7 +78,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
72 struct page **p; 78 struct page **p;
73 int npages = obj->size >> PAGE_SHIFT; 79 int npages = obj->size >> PAGE_SHIFT;
74 80
75 if (iommu_present(&platform_bus_type)) 81 if (use_pages(obj))
76 p = drm_gem_get_pages(obj); 82 p = drm_gem_get_pages(obj);
77 else 83 else
78 p = get_pages_vram(obj, npages); 84 p = get_pages_vram(obj, npages);
@@ -116,7 +122,7 @@ static void put_pages(struct drm_gem_object *obj)
116 sg_free_table(msm_obj->sgt); 122 sg_free_table(msm_obj->sgt);
117 kfree(msm_obj->sgt); 123 kfree(msm_obj->sgt);
118 124
119 if (iommu_present(&platform_bus_type)) 125 if (use_pages(obj))
120 drm_gem_put_pages(obj, msm_obj->pages, true, false); 126 drm_gem_put_pages(obj, msm_obj->pages, true, false);
121 else { 127 else {
122 drm_mm_remove_node(msm_obj->vram_node); 128 drm_mm_remove_node(msm_obj->vram_node);
@@ -580,6 +586,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
580 struct msm_drm_private *priv = dev->dev_private; 586 struct msm_drm_private *priv = dev->dev_private;
581 struct msm_gem_object *msm_obj; 587 struct msm_gem_object *msm_obj;
582 unsigned sz; 588 unsigned sz;
589 bool use_vram = false;
583 590
584 switch (flags & MSM_BO_CACHE_MASK) { 591 switch (flags & MSM_BO_CACHE_MASK) {
585 case MSM_BO_UNCACHED: 592 case MSM_BO_UNCACHED:
@@ -592,15 +599,23 @@ static int msm_gem_new_impl(struct drm_device *dev,
592 return -EINVAL; 599 return -EINVAL;
593 } 600 }
594 601
595 sz = sizeof(*msm_obj);
596 if (!iommu_present(&platform_bus_type)) 602 if (!iommu_present(&platform_bus_type))
603 use_vram = true;
604 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
605 use_vram = true;
606
607 if (WARN_ON(use_vram && !priv->vram.size))
608 return -EINVAL;
609
610 sz = sizeof(*msm_obj);
611 if (use_vram)
597 sz += sizeof(struct drm_mm_node); 612 sz += sizeof(struct drm_mm_node);
598 613
599 msm_obj = kzalloc(sz, GFP_KERNEL); 614 msm_obj = kzalloc(sz, GFP_KERNEL);
600 if (!msm_obj) 615 if (!msm_obj)
601 return -ENOMEM; 616 return -ENOMEM;
602 617
603 if (!iommu_present(&platform_bus_type)) 618 if (use_vram)
604 msm_obj->vram_node = (void *)&msm_obj[1]; 619 msm_obj->vram_node = (void *)&msm_obj[1];
605 620
606 msm_obj->flags = flags; 621 msm_obj->flags = flags;
@@ -630,7 +645,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
630 if (ret) 645 if (ret)
631 goto fail; 646 goto fail;
632 647
633 if (iommu_present(&platform_bus_type)) { 648 if (use_pages(obj)) {
634 ret = drm_gem_object_init(dev, obj, size); 649 ret = drm_gem_object_init(dev, obj, size);
635 if (ret) 650 if (ret)
636 goto fail; 651 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8fbbd0594c46..85d481e29276 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -21,6 +21,9 @@
21#include <linux/reservation.h> 21#include <linux/reservation.h>
22#include "msm_drv.h" 22#include "msm_drv.h"
23 23
24/* Additional internal-use only BO flags: */
25#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
26
24struct msm_gem_object { 27struct msm_gem_object {
25 struct drm_gem_object base; 28 struct drm_gem_object base;
26 29
@@ -59,7 +62,7 @@ struct msm_gem_object {
59 struct reservation_object _resv; 62 struct reservation_object _resv;
60 63
61 /* For physically contiguous buffers. Used when we don't have 64 /* For physically contiguous buffers. Used when we don't have
62 * an IOMMU. 65 * an IOMMU. Also used for stolen/splashscreen buffer.
63 */ 66 */
64 struct drm_mm_node *vram_node; 67 struct drm_mm_node *vram_node;
65}; 68};