diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 160 |
1 files changed, 121 insertions, 39 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 2bae46c66a30..e587d251c590 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/shmem_fs.h> | 19 | #include <linux/shmem_fs.h> |
20 | #include <linux/dma-buf.h> | ||
20 | 21 | ||
21 | #include "msm_drv.h" | 22 | #include "msm_drv.h" |
22 | #include "msm_gem.h" | 23 | #include "msm_gem.h" |
@@ -77,6 +78,21 @@ static void put_pages(struct drm_gem_object *obj) | |||
77 | } | 78 | } |
78 | } | 79 | } |
79 | 80 | ||
81 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) | ||
82 | { | ||
83 | struct drm_device *dev = obj->dev; | ||
84 | struct page **p; | ||
85 | mutex_lock(&dev->struct_mutex); | ||
86 | p = get_pages(obj); | ||
87 | mutex_unlock(&dev->struct_mutex); | ||
88 | return p; | ||
89 | } | ||
90 | |||
91 | void msm_gem_put_pages(struct drm_gem_object *obj) | ||
92 | { | ||
93 | /* when we start tracking the pin count, then do something here */ | ||
94 | } | ||
95 | |||
80 | int msm_gem_mmap_obj(struct drm_gem_object *obj, | 96 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
81 | struct vm_area_struct *vma) | 97 | struct vm_area_struct *vma) |
82 | { | 98 | { |
@@ -162,6 +178,11 @@ out: | |||
162 | case 0: | 178 | case 0: |
163 | case -ERESTARTSYS: | 179 | case -ERESTARTSYS: |
164 | case -EINTR: | 180 | case -EINTR: |
181 | case -EBUSY: | ||
182 | /* | ||
183 | * EBUSY is ok: this just means that another thread | ||
184 | * already did the job. | ||
185 | */ | ||
165 | return VM_FAULT_NOPAGE; | 186 | return VM_FAULT_NOPAGE; |
166 | case -ENOMEM: | 187 | case -ENOMEM: |
167 | return VM_FAULT_OOM; | 188 | return VM_FAULT_OOM; |
@@ -293,7 +314,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | |||
293 | 314 | ||
294 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) | 315 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) |
295 | { | 316 | { |
317 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
296 | int ret; | 318 | int ret; |
319 | |||
320 | /* this is safe right now because we don't unmap until the | ||
321 | * bo is deleted: | ||
322 | */ | ||
323 | if (msm_obj->domain[id].iova) { | ||
324 | *iova = msm_obj->domain[id].iova; | ||
325 | return 0; | ||
326 | } | ||
327 | |||
297 | mutex_lock(&obj->dev->struct_mutex); | 328 | mutex_lock(&obj->dev->struct_mutex); |
298 | ret = msm_gem_get_iova_locked(obj, id, iova); | 329 | ret = msm_gem_get_iova_locked(obj, id, iova); |
299 | mutex_unlock(&obj->dev->struct_mutex); | 330 | mutex_unlock(&obj->dev->struct_mutex); |
@@ -363,8 +394,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj) | |||
363 | return ret; | 394 | return ret; |
364 | } | 395 | } |
365 | 396 | ||
366 | int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | 397 | /* setup callback for when bo is no longer busy.. |
367 | struct work_struct *work) | 398 | * TODO probably want to differentiate read vs write.. |
399 | */ | ||
400 | int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, | ||
401 | struct msm_fence_cb *cb) | ||
368 | { | 402 | { |
369 | struct drm_device *dev = obj->dev; | 403 | struct drm_device *dev = obj->dev; |
370 | struct msm_drm_private *priv = dev->dev_private; | 404 | struct msm_drm_private *priv = dev->dev_private; |
@@ -372,12 +406,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | |||
372 | int ret = 0; | 406 | int ret = 0; |
373 | 407 | ||
374 | mutex_lock(&dev->struct_mutex); | 408 | mutex_lock(&dev->struct_mutex); |
375 | if (!list_empty(&work->entry)) { | 409 | if (!list_empty(&cb->work.entry)) { |
376 | ret = -EINVAL; | 410 | ret = -EINVAL; |
377 | } else if (is_active(msm_obj)) { | 411 | } else if (is_active(msm_obj)) { |
378 | list_add_tail(&work->entry, &msm_obj->inactive_work); | 412 | cb->fence = max(msm_obj->read_fence, msm_obj->write_fence); |
413 | list_add_tail(&cb->work.entry, &priv->fence_cbs); | ||
379 | } else { | 414 | } else { |
380 | queue_work(priv->wq, work); | 415 | queue_work(priv->wq, &cb->work); |
381 | } | 416 | } |
382 | mutex_unlock(&dev->struct_mutex); | 417 | mutex_unlock(&dev->struct_mutex); |
383 | 418 | ||
@@ -410,16 +445,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |||
410 | msm_obj->write_fence = 0; | 445 | msm_obj->write_fence = 0; |
411 | list_del_init(&msm_obj->mm_list); | 446 | list_del_init(&msm_obj->mm_list); |
412 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 447 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
413 | |||
414 | while (!list_empty(&msm_obj->inactive_work)) { | ||
415 | struct work_struct *work; | ||
416 | |||
417 | work = list_first_entry(&msm_obj->inactive_work, | ||
418 | struct work_struct, entry); | ||
419 | |||
420 | list_del_init(&work->entry); | ||
421 | queue_work(priv->wq, work); | ||
422 | } | ||
423 | } | 448 | } |
424 | 449 | ||
425 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | 450 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, |
@@ -510,10 +535,21 @@ void msm_gem_free_object(struct drm_gem_object *obj) | |||
510 | 535 | ||
511 | drm_gem_free_mmap_offset(obj); | 536 | drm_gem_free_mmap_offset(obj); |
512 | 537 | ||
513 | if (msm_obj->vaddr) | 538 | if (obj->import_attach) { |
514 | vunmap(msm_obj->vaddr); | 539 | if (msm_obj->vaddr) |
540 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); | ||
515 | 541 | ||
516 | put_pages(obj); | 542 | /* Don't drop the pages for imported dmabuf, as they are not |
543 | * ours, just free the array we allocated: | ||
544 | */ | ||
545 | if (msm_obj->pages) | ||
546 | drm_free_large(msm_obj->pages); | ||
547 | |||
548 | } else { | ||
549 | if (msm_obj->vaddr) | ||
550 | vunmap(msm_obj->vaddr); | ||
551 | put_pages(obj); | ||
552 | } | ||
517 | 553 | ||
518 | if (msm_obj->resv == &msm_obj->_resv) | 554 | if (msm_obj->resv == &msm_obj->_resv) |
519 | reservation_object_fini(msm_obj->resv); | 555 | reservation_object_fini(msm_obj->resv); |
@@ -549,17 +585,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |||
549 | return ret; | 585 | return ret; |
550 | } | 586 | } |
551 | 587 | ||
552 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | 588 | static int msm_gem_new_impl(struct drm_device *dev, |
553 | uint32_t size, uint32_t flags) | 589 | uint32_t size, uint32_t flags, |
590 | struct drm_gem_object **obj) | ||
554 | { | 591 | { |
555 | struct msm_drm_private *priv = dev->dev_private; | 592 | struct msm_drm_private *priv = dev->dev_private; |
556 | struct msm_gem_object *msm_obj; | 593 | struct msm_gem_object *msm_obj; |
557 | struct drm_gem_object *obj = NULL; | ||
558 | int ret; | ||
559 | |||
560 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
561 | |||
562 | size = PAGE_ALIGN(size); | ||
563 | 594 | ||
564 | switch (flags & MSM_BO_CACHE_MASK) { | 595 | switch (flags & MSM_BO_CACHE_MASK) { |
565 | case MSM_BO_UNCACHED: | 596 | case MSM_BO_UNCACHED: |
@@ -569,21 +600,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
569 | default: | 600 | default: |
570 | dev_err(dev->dev, "invalid cache flag: %x\n", | 601 | dev_err(dev->dev, "invalid cache flag: %x\n", |
571 | (flags & MSM_BO_CACHE_MASK)); | 602 | (flags & MSM_BO_CACHE_MASK)); |
572 | ret = -EINVAL; | 603 | return -EINVAL; |
573 | goto fail; | ||
574 | } | 604 | } |
575 | 605 | ||
576 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); | 606 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); |
577 | if (!msm_obj) { | 607 | if (!msm_obj) |
578 | ret = -ENOMEM; | 608 | return -ENOMEM; |
579 | goto fail; | ||
580 | } | ||
581 | |||
582 | obj = &msm_obj->base; | ||
583 | |||
584 | ret = drm_gem_object_init(dev, obj, size); | ||
585 | if (ret) | ||
586 | goto fail; | ||
587 | 609 | ||
588 | msm_obj->flags = flags; | 610 | msm_obj->flags = flags; |
589 | 611 | ||
@@ -591,9 +613,69 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
591 | reservation_object_init(msm_obj->resv); | 613 | reservation_object_init(msm_obj->resv); |
592 | 614 | ||
593 | INIT_LIST_HEAD(&msm_obj->submit_entry); | 615 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
594 | INIT_LIST_HEAD(&msm_obj->inactive_work); | ||
595 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 616 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
596 | 617 | ||
618 | *obj = &msm_obj->base; | ||
619 | |||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | ||
624 | uint32_t size, uint32_t flags) | ||
625 | { | ||
626 | struct drm_gem_object *obj; | ||
627 | int ret; | ||
628 | |||
629 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
630 | |||
631 | size = PAGE_ALIGN(size); | ||
632 | |||
633 | ret = msm_gem_new_impl(dev, size, flags, &obj); | ||
634 | if (ret) | ||
635 | goto fail; | ||
636 | |||
637 | ret = drm_gem_object_init(dev, obj, size); | ||
638 | if (ret) | ||
639 | goto fail; | ||
640 | |||
641 | return obj; | ||
642 | |||
643 | fail: | ||
644 | if (obj) | ||
645 | drm_gem_object_unreference_unlocked(obj); | ||
646 | |||
647 | return ERR_PTR(ret); | ||
648 | } | ||
649 | |||
650 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, | ||
651 | uint32_t size, struct sg_table *sgt) | ||
652 | { | ||
653 | struct msm_gem_object *msm_obj; | ||
654 | struct drm_gem_object *obj; | ||
655 | int ret, npages; | ||
656 | |||
657 | size = PAGE_ALIGN(size); | ||
658 | |||
659 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); | ||
660 | if (ret) | ||
661 | goto fail; | ||
662 | |||
663 | drm_gem_private_object_init(dev, obj, size); | ||
664 | |||
665 | npages = size / PAGE_SIZE; | ||
666 | |||
667 | msm_obj = to_msm_bo(obj); | ||
668 | msm_obj->sgt = sgt; | ||
669 | msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); | ||
670 | if (!msm_obj->pages) { | ||
671 | ret = -ENOMEM; | ||
672 | goto fail; | ||
673 | } | ||
674 | |||
675 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | ||
676 | if (ret) | ||
677 | goto fail; | ||
678 | |||
597 | return obj; | 679 | return obj; |
598 | 680 | ||
599 | fail: | 681 | fail: |