aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c170
1 files changed, 89 insertions, 81 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e587d251c590..d8d60c969ac7 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -22,7 +22,45 @@
22#include "msm_drv.h" 22#include "msm_drv.h"
23#include "msm_gem.h" 23#include "msm_gem.h"
24#include "msm_gpu.h" 24#include "msm_gpu.h"
25#include "msm_mmu.h"
25 26
27static dma_addr_t physaddr(struct drm_gem_object *obj)
28{
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30 struct msm_drm_private *priv = obj->dev->dev_private;
31 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
32 priv->vram.paddr;
33}
34
35/* allocate pages from VRAM carveout, used when no IOMMU: */
36static struct page **get_pages_vram(struct drm_gem_object *obj,
37 int npages)
38{
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 struct msm_drm_private *priv = obj->dev->dev_private;
41 dma_addr_t paddr;
42 struct page **p;
43 int ret, i;
44
45 p = drm_malloc_ab(npages, sizeof(struct page *));
46 if (!p)
47 return ERR_PTR(-ENOMEM);
48
49 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
50 npages, 0, DRM_MM_SEARCH_DEFAULT);
51 if (ret) {
52 drm_free_large(p);
53 return ERR_PTR(ret);
54 }
55
56 paddr = physaddr(obj);
57 for (i = 0; i < npages; i++) {
58 p[i] = phys_to_page(paddr);
59 paddr += PAGE_SIZE;
60 }
61
62 return p;
63}
26 64
27/* called with dev->struct_mutex held */ 65/* called with dev->struct_mutex held */
28static struct page **get_pages(struct drm_gem_object *obj) 66static struct page **get_pages(struct drm_gem_object *obj)
@@ -31,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj)
31 69
32 if (!msm_obj->pages) { 70 if (!msm_obj->pages) {
33 struct drm_device *dev = obj->dev; 71 struct drm_device *dev = obj->dev;
34 struct page **p = drm_gem_get_pages(obj, 0); 72 struct page **p;
35 int npages = obj->size >> PAGE_SHIFT; 73 int npages = obj->size >> PAGE_SHIFT;
36 74
75 if (iommu_present(&platform_bus_type))
76 p = drm_gem_get_pages(obj, 0);
77 else
78 p = get_pages_vram(obj, npages);
79
37 if (IS_ERR(p)) { 80 if (IS_ERR(p)) {
38 dev_err(dev->dev, "could not get pages: %ld\n", 81 dev_err(dev->dev, "could not get pages: %ld\n",
39 PTR_ERR(p)); 82 PTR_ERR(p));
@@ -73,7 +116,11 @@ static void put_pages(struct drm_gem_object *obj)
73 sg_free_table(msm_obj->sgt); 116 sg_free_table(msm_obj->sgt);
74 kfree(msm_obj->sgt); 117 kfree(msm_obj->sgt);
75 118
76 drm_gem_put_pages(obj, msm_obj->pages, true, false); 119 if (iommu_present(&platform_bus_type))
120 drm_gem_put_pages(obj, msm_obj->pages, true, false);
121 else
122 drm_mm_remove_node(msm_obj->vram_node);
123
77 msm_obj->pages = NULL; 124 msm_obj->pages = NULL;
78 } 125 }
79} 126}
@@ -138,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
138int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 185int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
139{ 186{
140 struct drm_gem_object *obj = vma->vm_private_data; 187 struct drm_gem_object *obj = vma->vm_private_data;
141 struct msm_gem_object *msm_obj = to_msm_bo(obj);
142 struct drm_device *dev = obj->dev; 188 struct drm_device *dev = obj->dev;
143 struct page **pages; 189 struct page **pages;
144 unsigned long pfn; 190 unsigned long pfn;
@@ -163,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
163 pgoff = ((unsigned long)vmf->virtual_address - 209 pgoff = ((unsigned long)vmf->virtual_address -
164 vma->vm_start) >> PAGE_SHIFT; 210 vma->vm_start) >> PAGE_SHIFT;
165 211
166 pfn = page_to_pfn(msm_obj->pages[pgoff]); 212 pfn = page_to_pfn(pages[pgoff]);
167 213
168 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 214 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
169 pfn, pfn << PAGE_SHIFT); 215 pfn, pfn << PAGE_SHIFT);
@@ -219,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
219 return offset; 265 return offset;
220} 266}
221 267
222/* helpers for dealing w/ iommu: */
223static int map_range(struct iommu_domain *domain, unsigned int iova,
224 struct sg_table *sgt, unsigned int len, int prot)
225{
226 struct scatterlist *sg;
227 unsigned int da = iova;
228 unsigned int i, j;
229 int ret;
230
231 if (!domain || !sgt)
232 return -EINVAL;
233
234 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
235 u32 pa = sg_phys(sg) - sg->offset;
236 size_t bytes = sg->length + sg->offset;
237
238 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
239
240 ret = iommu_map(domain, da, pa, bytes, prot);
241 if (ret)
242 goto fail;
243
244 da += bytes;
245 }
246
247 return 0;
248
249fail:
250 da = iova;
251
252 for_each_sg(sgt->sgl, sg, i, j) {
253 size_t bytes = sg->length + sg->offset;
254 iommu_unmap(domain, da, bytes);
255 da += bytes;
256 }
257 return ret;
258}
259
260static void unmap_range(struct iommu_domain *domain, unsigned int iova,
261 struct sg_table *sgt, unsigned int len)
262{
263 struct scatterlist *sg;
264 unsigned int da = iova;
265 int i;
266
267 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
268 size_t bytes = sg->length + sg->offset;
269 size_t unmapped;
270
271 unmapped = iommu_unmap(domain, da, bytes);
272 if (unmapped < bytes)
273 break;
274
275 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
276
277 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
278
279 da += bytes;
280 }
281}
282
283/* should be called under struct_mutex.. although it can be called 268/* should be called under struct_mutex.. although it can be called
284 * from atomic context without struct_mutex to acquire an extra 269 * from atomic context without struct_mutex to acquire an extra
285 * iova ref if you know one is already held. 270 * iova ref if you know one is already held.
@@ -295,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
295 280
296 if (!msm_obj->domain[id].iova) { 281 if (!msm_obj->domain[id].iova) {
297 struct msm_drm_private *priv = obj->dev->dev_private; 282 struct msm_drm_private *priv = obj->dev->dev_private;
298 uint32_t offset = (uint32_t)mmap_offset(obj); 283 struct msm_mmu *mmu = priv->mmus[id];
299 struct page **pages; 284 struct page **pages = get_pages(obj);
300 pages = get_pages(obj); 285
301 if (IS_ERR(pages)) 286 if (IS_ERR(pages))
302 return PTR_ERR(pages); 287 return PTR_ERR(pages);
303 // XXX ideally we would not map buffers writable when not needed... 288
304 ret = map_range(priv->iommus[id], offset, msm_obj->sgt, 289 if (iommu_present(&platform_bus_type)) {
305 obj->size, IOMMU_READ | IOMMU_WRITE); 290 uint32_t offset = (uint32_t)mmap_offset(obj);
306 msm_obj->domain[id].iova = offset; 291 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
292 obj->size, IOMMU_READ | IOMMU_WRITE);
293 msm_obj->domain[id].iova = offset;
294 } else {
295 msm_obj->domain[id].iova = physaddr(obj);
296 }
307 } 297 }
308 298
309 if (!ret) 299 if (!ret)
@@ -514,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
514void msm_gem_free_object(struct drm_gem_object *obj) 504void msm_gem_free_object(struct drm_gem_object *obj)
515{ 505{
516 struct drm_device *dev = obj->dev; 506 struct drm_device *dev = obj->dev;
507 struct msm_drm_private *priv = obj->dev->dev_private;
517 struct msm_gem_object *msm_obj = to_msm_bo(obj); 508 struct msm_gem_object *msm_obj = to_msm_bo(obj);
518 int id; 509 int id;
519 510
@@ -525,11 +516,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
525 list_del(&msm_obj->mm_list); 516 list_del(&msm_obj->mm_list);
526 517
527 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 518 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
528 if (msm_obj->domain[id].iova) { 519 struct msm_mmu *mmu = priv->mmus[id];
529 struct msm_drm_private *priv = obj->dev->dev_private; 520 if (mmu && msm_obj->domain[id].iova) {
530 uint32_t offset = (uint32_t)mmap_offset(obj); 521 uint32_t offset = (uint32_t)mmap_offset(obj);
531 unmap_range(priv->iommus[id], offset, 522 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
532 msm_obj->sgt, obj->size);
533 } 523 }
534 } 524 }
535 525
@@ -591,6 +581,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
591{ 581{
592 struct msm_drm_private *priv = dev->dev_private; 582 struct msm_drm_private *priv = dev->dev_private;
593 struct msm_gem_object *msm_obj; 583 struct msm_gem_object *msm_obj;
584 unsigned sz;
594 585
595 switch (flags & MSM_BO_CACHE_MASK) { 586 switch (flags & MSM_BO_CACHE_MASK) {
596 case MSM_BO_UNCACHED: 587 case MSM_BO_UNCACHED:
@@ -603,10 +594,17 @@ static int msm_gem_new_impl(struct drm_device *dev,
603 return -EINVAL; 594 return -EINVAL;
604 } 595 }
605 596
606 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 597 sz = sizeof(*msm_obj);
598 if (!iommu_present(&platform_bus_type))
599 sz += sizeof(struct drm_mm_node);
600
601 msm_obj = kzalloc(sz, GFP_KERNEL);
607 if (!msm_obj) 602 if (!msm_obj)
608 return -ENOMEM; 603 return -ENOMEM;
609 604
605 if (!iommu_present(&platform_bus_type))
606 msm_obj->vram_node = (void *)&msm_obj[1];
607
610 msm_obj->flags = flags; 608 msm_obj->flags = flags;
611 609
612 msm_obj->resv = &msm_obj->_resv; 610 msm_obj->resv = &msm_obj->_resv;
@@ -623,7 +621,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
623struct drm_gem_object *msm_gem_new(struct drm_device *dev, 621struct drm_gem_object *msm_gem_new(struct drm_device *dev,
624 uint32_t size, uint32_t flags) 622 uint32_t size, uint32_t flags)
625{ 623{
626 struct drm_gem_object *obj; 624 struct drm_gem_object *obj = NULL;
627 int ret; 625 int ret;
628 626
629 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 627 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -634,9 +632,13 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
634 if (ret) 632 if (ret)
635 goto fail; 633 goto fail;
636 634
637 ret = drm_gem_object_init(dev, obj, size); 635 if (iommu_present(&platform_bus_type)) {
638 if (ret) 636 ret = drm_gem_object_init(dev, obj, size);
639 goto fail; 637 if (ret)
638 goto fail;
639 } else {
640 drm_gem_private_object_init(dev, obj, size);
641 }
640 642
641 return obj; 643 return obj;
642 644
@@ -654,6 +656,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
654 struct drm_gem_object *obj; 656 struct drm_gem_object *obj;
655 int ret, npages; 657 int ret, npages;
656 658
659 /* if we don't have IOMMU, don't bother pretending we can import: */
660 if (!iommu_present(&platform_bus_type)) {
661 dev_err(dev->dev, "cannot import without IOMMU\n");
662 return ERR_PTR(-EINVAL);
663 }
664
657 size = PAGE_ALIGN(size); 665 size = PAGE_ALIGN(size);
658 666
659 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 667 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);