diff options
author | Rob Clark <robdclark@gmail.com> | 2017-06-13 13:54:13 -0400 |
---|---|---|
committer | Rob Clark <robdclark@gmail.com> | 2017-06-16 11:16:06 -0400 |
commit | 4b85f7f5cf776b0fcd4a2e38cb9c69849aae0fc5 (patch) | |
tree | f836e8192980d9ba943174436ab616f87af20d1f | |
parent | f4839bd5126310635314610a85468e87b40ce4c8 (diff) |
drm/msm: support for an arbitrary number of address spaces
It means we have to do a list traversal where we once had an index into
a table. But the list will normally have one or two entries.
Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 138 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.h | 4 |
2 files changed, 99 insertions, 43 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 2e5c987f7f2c..9951c78ee215 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -283,21 +283,59 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |||
283 | return offset; | 283 | return offset; |
284 | } | 284 | } |
285 | 285 | ||
286 | static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, | ||
287 | struct msm_gem_address_space *aspace) | ||
288 | { | ||
289 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
290 | struct msm_gem_vma *vma; | ||
291 | |||
292 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); | ||
293 | if (!vma) | ||
294 | return ERR_PTR(-ENOMEM); | ||
295 | |||
296 | vma->aspace = aspace; | ||
297 | |||
298 | list_add_tail(&vma->list, &msm_obj->vmas); | ||
299 | |||
300 | return vma; | ||
301 | } | ||
302 | |||
303 | static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, | ||
304 | struct msm_gem_address_space *aspace) | ||
305 | { | ||
306 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
307 | struct msm_gem_vma *vma; | ||
308 | |||
309 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | ||
310 | |||
311 | list_for_each_entry(vma, &msm_obj->vmas, list) { | ||
312 | if (vma->aspace == aspace) | ||
313 | return vma; | ||
314 | } | ||
315 | |||
316 | return NULL; | ||
317 | } | ||
318 | |||
319 | static void del_vma(struct msm_gem_vma *vma) | ||
320 | { | ||
321 | if (!vma) | ||
322 | return; | ||
323 | |||
324 | list_del(&vma->list); | ||
325 | kfree(vma); | ||
326 | } | ||
327 | |||
286 | static void | 328 | static void |
287 | put_iova(struct drm_gem_object *obj) | 329 | put_iova(struct drm_gem_object *obj) |
288 | { | 330 | { |
289 | struct drm_device *dev = obj->dev; | ||
290 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
291 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 331 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
292 | int id; | 332 | struct msm_gem_vma *vma, *tmp; |
293 | 333 | ||
294 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 334 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
295 | 335 | ||
296 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | 336 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
297 | if (!priv->aspace[id]) | 337 | msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); |
298 | continue; | 338 | del_vma(vma); |
299 | msm_gem_unmap_vma(priv->aspace[id], | ||
300 | &msm_obj->domain[id], msm_obj->sgt); | ||
301 | } | 339 | } |
302 | } | 340 | } |
303 | 341 | ||
@@ -312,24 +350,37 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, | |||
312 | struct msm_gem_address_space *aspace, uint64_t *iova) | 350 | struct msm_gem_address_space *aspace, uint64_t *iova) |
313 | { | 351 | { |
314 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 352 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
315 | int id = aspace ? aspace->id : 0; | 353 | struct msm_gem_vma *vma; |
316 | int ret = 0; | 354 | int ret = 0; |
317 | 355 | ||
318 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 356 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
319 | 357 | ||
320 | if (!msm_obj->domain[id].iova) { | 358 | vma = lookup_vma(obj, aspace); |
321 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
322 | struct page **pages = get_pages(obj); | ||
323 | 359 | ||
324 | if (IS_ERR(pages)) | 360 | if (!vma) { |
325 | return PTR_ERR(pages); | 361 | struct page **pages; |
362 | |||
363 | vma = add_vma(obj, aspace); | ||
364 | if (IS_ERR(vma)) | ||
365 | return PTR_ERR(vma); | ||
366 | |||
367 | pages = get_pages(obj); | ||
368 | if (IS_ERR(pages)) { | ||
369 | ret = PTR_ERR(pages); | ||
370 | goto fail; | ||
371 | } | ||
326 | 372 | ||
327 | ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], | 373 | ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt, |
328 | msm_obj->sgt, obj->size >> PAGE_SHIFT); | 374 | obj->size >> PAGE_SHIFT); |
375 | if (ret) | ||
376 | goto fail; | ||
329 | } | 377 | } |
330 | 378 | ||
331 | if (!ret) | 379 | *iova = vma->iova; |
332 | *iova = msm_obj->domain[id].iova; | 380 | return 0; |
381 | |||
382 | fail: | ||
383 | del_vma(vma); | ||
333 | 384 | ||
334 | return ret; | 385 | return ret; |
335 | } | 386 | } |
@@ -338,22 +389,12 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, | |||
338 | int msm_gem_get_iova(struct drm_gem_object *obj, | 389 | int msm_gem_get_iova(struct drm_gem_object *obj, |
339 | struct msm_gem_address_space *aspace, uint64_t *iova) | 390 | struct msm_gem_address_space *aspace, uint64_t *iova) |
340 | { | 391 | { |
341 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
342 | int id = aspace ? aspace->id : 0; | ||
343 | int ret; | 392 | int ret; |
344 | 393 | ||
345 | /* this is safe right now because we don't unmap until the | ||
346 | * bo is deleted: | ||
347 | */ | ||
348 | if (msm_obj->domain[id].iova) { | ||
349 | might_lock(&obj->dev->struct_mutex); | ||
350 | *iova = msm_obj->domain[id].iova; | ||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | mutex_lock(&obj->dev->struct_mutex); | 394 | mutex_lock(&obj->dev->struct_mutex); |
355 | ret = msm_gem_get_iova_locked(obj, aspace, iova); | 395 | ret = msm_gem_get_iova_locked(obj, aspace, iova); |
356 | mutex_unlock(&obj->dev->struct_mutex); | 396 | mutex_unlock(&obj->dev->struct_mutex); |
397 | |||
357 | return ret; | 398 | return ret; |
358 | } | 399 | } |
359 | 400 | ||
@@ -363,10 +404,14 @@ int msm_gem_get_iova(struct drm_gem_object *obj, | |||
363 | uint64_t msm_gem_iova(struct drm_gem_object *obj, | 404 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
364 | struct msm_gem_address_space *aspace) | 405 | struct msm_gem_address_space *aspace) |
365 | { | 406 | { |
366 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 407 | struct msm_gem_vma *vma; |
367 | int id = aspace ? aspace->id : 0; | 408 | |
368 | WARN_ON(!msm_obj->domain[id].iova); | 409 | mutex_lock(&obj->dev->struct_mutex); |
369 | return msm_obj->domain[id].iova; | 410 | vma = lookup_vma(obj, aspace); |
411 | mutex_unlock(&obj->dev->struct_mutex); | ||
412 | WARN_ON(!vma); | ||
413 | |||
414 | return vma ? vma->iova : 0; | ||
370 | } | 415 | } |
371 | 416 | ||
372 | void msm_gem_put_iova(struct drm_gem_object *obj, | 417 | void msm_gem_put_iova(struct drm_gem_object *obj, |
@@ -624,11 +669,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
624 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 669 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
625 | struct reservation_object *robj = msm_obj->resv; | 670 | struct reservation_object *robj = msm_obj->resv; |
626 | struct reservation_object_list *fobj; | 671 | struct reservation_object_list *fobj; |
627 | struct msm_drm_private *priv = obj->dev->dev_private; | ||
628 | struct dma_fence *fence; | 672 | struct dma_fence *fence; |
673 | struct msm_gem_vma *vma; | ||
629 | uint64_t off = drm_vma_node_start(&obj->vma_node); | 674 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
630 | const char *madv; | 675 | const char *madv; |
631 | unsigned id; | ||
632 | 676 | ||
633 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 677 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
634 | 678 | ||
@@ -650,8 +694,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
650 | obj->name, kref_read(&obj->refcount), | 694 | obj->name, kref_read(&obj->refcount), |
651 | off, msm_obj->vaddr); | 695 | off, msm_obj->vaddr); |
652 | 696 | ||
653 | for (id = 0; id < priv->num_aspaces; id++) | 697 | /* FIXME: we need to print the address space here too */ |
654 | seq_printf(m, " %08llx", msm_obj->domain[id].iova); | 698 | list_for_each_entry(vma, &msm_obj->vmas, list) |
699 | seq_printf(m, " %08llx", vma->iova); | ||
655 | 700 | ||
656 | seq_printf(m, " %zu%s\n", obj->size, madv); | 701 | seq_printf(m, " %zu%s\n", obj->size, madv); |
657 | 702 | ||
@@ -790,6 +835,8 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
790 | } | 835 | } |
791 | 836 | ||
792 | INIT_LIST_HEAD(&msm_obj->submit_entry); | 837 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
838 | INIT_LIST_HEAD(&msm_obj->vmas); | ||
839 | |||
793 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 840 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
794 | 841 | ||
795 | *obj = &msm_obj->base; | 842 | *obj = &msm_obj->base; |
@@ -828,19 +875,26 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
828 | goto fail; | 875 | goto fail; |
829 | 876 | ||
830 | if (use_vram) { | 877 | if (use_vram) { |
831 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 878 | struct msm_gem_vma *vma; |
832 | struct page **pages; | 879 | struct page **pages; |
833 | 880 | ||
834 | msm_obj->vram_node = &msm_obj->domain[0].node; | 881 | vma = add_vma(obj, NULL); |
882 | if (IS_ERR(vma)) { | ||
883 | ret = PTR_ERR(vma); | ||
884 | goto fail; | ||
885 | } | ||
886 | |||
887 | to_msm_bo(obj)->vram_node = &vma->node; | ||
888 | |||
835 | drm_gem_private_object_init(dev, obj, size); | 889 | drm_gem_private_object_init(dev, obj, size); |
836 | 890 | ||
837 | msm_obj->pages = get_pages(obj); | ||
838 | pages = get_pages(obj); | 891 | pages = get_pages(obj); |
839 | if (IS_ERR(pages)) { | 892 | if (IS_ERR(pages)) { |
840 | ret = PTR_ERR(pages); | 893 | ret = PTR_ERR(pages); |
841 | goto fail; | 894 | goto fail; |
842 | } | 895 | } |
843 | msm_obj->domain[0].iova = physaddr(obj); | 896 | |
897 | vma->iova = physaddr(obj); | ||
844 | } else { | 898 | } else { |
845 | ret = drm_gem_object_init(dev, obj, size); | 899 | ret = drm_gem_object_init(dev, obj, size); |
846 | if (ret) | 900 | if (ret) |
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 4b4b352b5718..ff468da70fb6 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
@@ -39,6 +39,8 @@ struct msm_gem_address_space { | |||
39 | struct msm_gem_vma { | 39 | struct msm_gem_vma { |
40 | struct drm_mm_node node; | 40 | struct drm_mm_node node; |
41 | uint64_t iova; | 41 | uint64_t iova; |
42 | struct msm_gem_address_space *aspace; | ||
43 | struct list_head list; /* node in msm_gem_object::vmas */ | ||
42 | }; | 44 | }; |
43 | 45 | ||
44 | struct msm_gem_object { | 46 | struct msm_gem_object { |
@@ -78,7 +80,7 @@ struct msm_gem_object { | |||
78 | struct sg_table *sgt; | 80 | struct sg_table *sgt; |
79 | void *vaddr; | 81 | void *vaddr; |
80 | 82 | ||
81 | struct msm_gem_vma domain[NUM_DOMAINS]; | 83 | struct list_head vmas; /* list of msm_gem_vma */ |
82 | 84 | ||
83 | /* normally (resv == &_resv) except for imported bo's */ | 85 | /* normally (resv == &_resv) except for imported bo's */ |
84 | struct reservation_object *resv; | 86 | struct reservation_object *resv; |