diff options
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 545 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 117 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_lock.c | 311 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_memory.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_object.c | 452 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 1 |
9 files changed, 1179 insertions, 274 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b0a9de7a57c2..1e138f5bae09 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | ccflags-y := -Iinclude/drm | 4 | ccflags-y := -Iinclude/drm |
5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ | 5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ |
6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o | 6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ |
7 | ttm_object.o ttm_lock.o ttm_execbuf_util.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_DRM_TTM) += ttm.o | 9 | obj-$(CONFIG_DRM_TTM) += ttm.o |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 87c06252d464..a835b6fe42a1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -27,6 +27,14 @@ | |||
27 | /* | 27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | /* Notes: | ||
31 | * | ||
32 | * We store bo pointer in drm_mm_node struct so we know which bo own a | ||
33 | * specific node. There is no protection on the pointer, thus to make | ||
34 | * sure things don't go berserk you have to access this pointer while | ||
35 | * holding the global lru lock and make sure anytime you free a node you | ||
36 | * reset the pointer to NULL. | ||
37 | */ | ||
30 | 38 | ||
31 | #include "ttm/ttm_module.h" | 39 | #include "ttm/ttm_module.h" |
32 | #include "ttm/ttm_bo_driver.h" | 40 | #include "ttm/ttm_bo_driver.h" |
@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = { | |||
51 | .mode = S_IRUGO | 59 | .mode = S_IRUGO |
52 | }; | 60 | }; |
53 | 61 | ||
62 | static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) | ||
63 | { | ||
64 | int i; | ||
65 | |||
66 | for (i = 0; i <= TTM_PL_PRIV5; i++) | ||
67 | if (flags & (1 << i)) { | ||
68 | *mem_type = i; | ||
69 | return 0; | ||
70 | } | ||
71 | return -EINVAL; | ||
72 | } | ||
73 | |||
74 | static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob, | ||
75 | struct ttm_mem_type_manager *man) | ||
76 | { | ||
77 | printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); | ||
78 | printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); | ||
79 | printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); | ||
80 | printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); | ||
81 | printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); | ||
82 | printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); | ||
83 | printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size); | ||
84 | printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", | ||
85 | man->available_caching); | ||
86 | printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", | ||
87 | man->default_caching); | ||
88 | spin_lock(&glob->lru_lock); | ||
89 | drm_mm_debug_table(&man->manager, TTM_PFX); | ||
90 | spin_unlock(&glob->lru_lock); | ||
91 | } | ||
92 | |||
93 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, | ||
94 | struct ttm_placement *placement) | ||
95 | { | ||
96 | struct ttm_bo_device *bdev = bo->bdev; | ||
97 | struct ttm_bo_global *glob = bo->glob; | ||
98 | struct ttm_mem_type_manager *man; | ||
99 | int i, ret, mem_type; | ||
100 | |||
101 | printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n", | ||
102 | bo, bo->mem.num_pages, bo->mem.size >> 10, | ||
103 | bo->mem.size >> 20); | ||
104 | for (i = 0; i < placement->num_placement; i++) { | ||
105 | ret = ttm_mem_type_from_flags(placement->placement[i], | ||
106 | &mem_type); | ||
107 | if (ret) | ||
108 | return; | ||
109 | man = &bdev->man[mem_type]; | ||
110 | printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", | ||
111 | i, placement->placement[i], mem_type); | ||
112 | ttm_mem_type_manager_debug(glob, man); | ||
113 | } | ||
114 | } | ||
115 | |||
54 | static ssize_t ttm_bo_global_show(struct kobject *kobj, | 116 | static ssize_t ttm_bo_global_show(struct kobject *kobj, |
55 | struct attribute *attr, | 117 | struct attribute *attr, |
56 | char *buffer) | 118 | char *buffer) |
@@ -117,7 +179,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) | |||
117 | ret = wait_event_interruptible(bo->event_queue, | 179 | ret = wait_event_interruptible(bo->event_queue, |
118 | atomic_read(&bo->reserved) == 0); | 180 | atomic_read(&bo->reserved) == 0); |
119 | if (unlikely(ret != 0)) | 181 | if (unlikely(ret != 0)) |
120 | return -ERESTART; | 182 | return ret; |
121 | } else { | 183 | } else { |
122 | wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); | 184 | wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); |
123 | } | 185 | } |
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve); | |||
247 | /* | 309 | /* |
248 | * Call bo->mutex locked. | 310 | * Call bo->mutex locked. |
249 | */ | 311 | */ |
250 | |||
251 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | 312 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
252 | { | 313 | { |
253 | struct ttm_bo_device *bdev = bo->bdev; | 314 | struct ttm_bo_device *bdev = bo->bdev; |
@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
275 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | 336 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
276 | page_flags | TTM_PAGE_FLAG_USER, | 337 | page_flags | TTM_PAGE_FLAG_USER, |
277 | glob->dummy_read_page); | 338 | glob->dummy_read_page); |
278 | if (unlikely(bo->ttm == NULL)) | 339 | if (unlikely(bo->ttm == NULL)) { |
279 | ret = -ENOMEM; | 340 | ret = -ENOMEM; |
280 | break; | 341 | break; |
342 | } | ||
281 | 343 | ||
282 | ret = ttm_tt_set_user(bo->ttm, current, | 344 | ret = ttm_tt_set_user(bo->ttm, current, |
283 | bo->buffer_start, bo->num_pages); | 345 | bo->buffer_start, bo->num_pages); |
@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
328 | } | 390 | } |
329 | 391 | ||
330 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { | 392 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
331 | 393 | bo->mem = *mem; | |
332 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
333 | uint32_t save_flags = old_mem->placement; | ||
334 | |||
335 | *old_mem = *mem; | ||
336 | mem->mm_node = NULL; | 394 | mem->mm_node = NULL; |
337 | ttm_flag_masked(&save_flags, mem->placement, | ||
338 | TTM_PL_MASK_MEMTYPE); | ||
339 | goto moved; | 395 | goto moved; |
340 | } | 396 | } |
341 | 397 | ||
@@ -418,6 +474,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
418 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 474 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
419 | } | 475 | } |
420 | if (bo->mem.mm_node) { | 476 | if (bo->mem.mm_node) { |
477 | bo->mem.mm_node->private = NULL; | ||
421 | drm_mm_put_block(bo->mem.mm_node); | 478 | drm_mm_put_block(bo->mem.mm_node); |
422 | bo->mem.mm_node = NULL; | 479 | bo->mem.mm_node = NULL; |
423 | } | 480 | } |
@@ -554,24 +611,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) | |||
554 | } | 611 | } |
555 | EXPORT_SYMBOL(ttm_bo_unref); | 612 | EXPORT_SYMBOL(ttm_bo_unref); |
556 | 613 | ||
557 | static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, | 614 | static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, |
558 | bool interruptible, bool no_wait) | 615 | bool no_wait) |
559 | { | 616 | { |
560 | int ret = 0; | ||
561 | struct ttm_bo_device *bdev = bo->bdev; | 617 | struct ttm_bo_device *bdev = bo->bdev; |
562 | struct ttm_bo_global *glob = bo->glob; | 618 | struct ttm_bo_global *glob = bo->glob; |
563 | struct ttm_mem_reg evict_mem; | 619 | struct ttm_mem_reg evict_mem; |
564 | uint32_t proposed_placement; | 620 | struct ttm_placement placement; |
565 | 621 | int ret = 0; | |
566 | if (bo->mem.mem_type != mem_type) | ||
567 | goto out; | ||
568 | 622 | ||
569 | spin_lock(&bo->lock); | 623 | spin_lock(&bo->lock); |
570 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); | 624 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); |
571 | spin_unlock(&bo->lock); | 625 | spin_unlock(&bo->lock); |
572 | 626 | ||
573 | if (unlikely(ret != 0)) { | 627 | if (unlikely(ret != 0)) { |
574 | if (ret != -ERESTART) { | 628 | if (ret != -ERESTARTSYS) { |
575 | printk(KERN_ERR TTM_PFX | 629 | printk(KERN_ERR TTM_PFX |
576 | "Failed to expire sync object before " | 630 | "Failed to expire sync object before " |
577 | "buffer eviction.\n"); | 631 | "buffer eviction.\n"); |
@@ -584,116 +638,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, | |||
584 | evict_mem = bo->mem; | 638 | evict_mem = bo->mem; |
585 | evict_mem.mm_node = NULL; | 639 | evict_mem.mm_node = NULL; |
586 | 640 | ||
587 | proposed_placement = bdev->driver->evict_flags(bo); | 641 | placement.fpfn = 0; |
588 | 642 | placement.lpfn = 0; | |
589 | ret = ttm_bo_mem_space(bo, proposed_placement, | 643 | placement.num_placement = 0; |
590 | &evict_mem, interruptible, no_wait); | 644 | placement.num_busy_placement = 0; |
591 | if (unlikely(ret != 0 && ret != -ERESTART)) | 645 | bdev->driver->evict_flags(bo, &placement); |
592 | ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, | 646 | ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, |
593 | &evict_mem, interruptible, no_wait); | 647 | no_wait); |
594 | |||
595 | if (ret) { | 648 | if (ret) { |
596 | if (ret != -ERESTART) | 649 | if (ret != -ERESTARTSYS) { |
597 | printk(KERN_ERR TTM_PFX | 650 | printk(KERN_ERR TTM_PFX |
598 | "Failed to find memory space for " | 651 | "Failed to find memory space for " |
599 | "buffer 0x%p eviction.\n", bo); | 652 | "buffer 0x%p eviction.\n", bo); |
653 | ttm_bo_mem_space_debug(bo, &placement); | ||
654 | } | ||
600 | goto out; | 655 | goto out; |
601 | } | 656 | } |
602 | 657 | ||
603 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, | 658 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, |
604 | no_wait); | 659 | no_wait); |
605 | if (ret) { | 660 | if (ret) { |
606 | if (ret != -ERESTART) | 661 | if (ret != -ERESTARTSYS) |
607 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); | 662 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); |
663 | spin_lock(&glob->lru_lock); | ||
664 | if (evict_mem.mm_node) { | ||
665 | evict_mem.mm_node->private = NULL; | ||
666 | drm_mm_put_block(evict_mem.mm_node); | ||
667 | evict_mem.mm_node = NULL; | ||
668 | } | ||
669 | spin_unlock(&glob->lru_lock); | ||
608 | goto out; | 670 | goto out; |
609 | } | 671 | } |
672 | bo->evicted = true; | ||
673 | out: | ||
674 | return ret; | ||
675 | } | ||
676 | |||
677 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, | ||
678 | uint32_t mem_type, | ||
679 | bool interruptible, bool no_wait) | ||
680 | { | ||
681 | struct ttm_bo_global *glob = bdev->glob; | ||
682 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | ||
683 | struct ttm_buffer_object *bo; | ||
684 | int ret, put_count = 0; | ||
610 | 685 | ||
611 | spin_lock(&glob->lru_lock); | 686 | spin_lock(&glob->lru_lock); |
612 | if (evict_mem.mm_node) { | 687 | bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); |
613 | drm_mm_put_block(evict_mem.mm_node); | 688 | kref_get(&bo->list_kref); |
614 | evict_mem.mm_node = NULL; | 689 | ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0); |
615 | } | 690 | if (likely(ret == 0)) |
691 | put_count = ttm_bo_del_from_lru(bo); | ||
616 | spin_unlock(&glob->lru_lock); | 692 | spin_unlock(&glob->lru_lock); |
617 | bo->evicted = true; | 693 | if (unlikely(ret != 0)) |
618 | out: | 694 | return ret; |
695 | while (put_count--) | ||
696 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
697 | ret = ttm_bo_evict(bo, interruptible, no_wait); | ||
698 | ttm_bo_unreserve(bo); | ||
699 | kref_put(&bo->list_kref, ttm_bo_release_list); | ||
619 | return ret; | 700 | return ret; |
620 | } | 701 | } |
621 | 702 | ||
703 | static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, | ||
704 | struct ttm_mem_type_manager *man, | ||
705 | struct ttm_placement *placement, | ||
706 | struct ttm_mem_reg *mem, | ||
707 | struct drm_mm_node **node) | ||
708 | { | ||
709 | struct ttm_bo_global *glob = bo->glob; | ||
710 | unsigned long lpfn; | ||
711 | int ret; | ||
712 | |||
713 | lpfn = placement->lpfn; | ||
714 | if (!lpfn) | ||
715 | lpfn = man->size; | ||
716 | *node = NULL; | ||
717 | do { | ||
718 | ret = drm_mm_pre_get(&man->manager); | ||
719 | if (unlikely(ret)) | ||
720 | return ret; | ||
721 | |||
722 | spin_lock(&glob->lru_lock); | ||
723 | *node = drm_mm_search_free_in_range(&man->manager, | ||
724 | mem->num_pages, mem->page_alignment, | ||
725 | placement->fpfn, lpfn, 1); | ||
726 | if (unlikely(*node == NULL)) { | ||
727 | spin_unlock(&glob->lru_lock); | ||
728 | return 0; | ||
729 | } | ||
730 | *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, | ||
731 | mem->page_alignment, | ||
732 | placement->fpfn, | ||
733 | lpfn); | ||
734 | spin_unlock(&glob->lru_lock); | ||
735 | } while (*node == NULL); | ||
736 | return 0; | ||
737 | } | ||
738 | |||
622 | /** | 739 | /** |
623 | * Repeatedly evict memory from the LRU for @mem_type until we create enough | 740 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
624 | * space, or we've evicted everything and there isn't enough space. | 741 | * space, or we've evicted everything and there isn't enough space. |
625 | */ | 742 | */ |
626 | static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, | 743 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
627 | struct ttm_mem_reg *mem, | 744 | uint32_t mem_type, |
628 | uint32_t mem_type, | 745 | struct ttm_placement *placement, |
629 | bool interruptible, bool no_wait) | 746 | struct ttm_mem_reg *mem, |
747 | bool interruptible, bool no_wait) | ||
630 | { | 748 | { |
749 | struct ttm_bo_device *bdev = bo->bdev; | ||
631 | struct ttm_bo_global *glob = bdev->glob; | 750 | struct ttm_bo_global *glob = bdev->glob; |
632 | struct drm_mm_node *node; | ||
633 | struct ttm_buffer_object *entry; | ||
634 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 751 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
635 | struct list_head *lru; | 752 | struct drm_mm_node *node; |
636 | unsigned long num_pages = mem->num_pages; | ||
637 | int put_count = 0; | ||
638 | int ret; | 753 | int ret; |
639 | 754 | ||
640 | retry_pre_get: | ||
641 | ret = drm_mm_pre_get(&man->manager); | ||
642 | if (unlikely(ret != 0)) | ||
643 | return ret; | ||
644 | |||
645 | spin_lock(&glob->lru_lock); | ||
646 | do { | 755 | do { |
647 | node = drm_mm_search_free(&man->manager, num_pages, | 756 | ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); |
648 | mem->page_alignment, 1); | 757 | if (unlikely(ret != 0)) |
758 | return ret; | ||
649 | if (node) | 759 | if (node) |
650 | break; | 760 | break; |
651 | 761 | spin_lock(&glob->lru_lock); | |
652 | lru = &man->lru; | 762 | if (list_empty(&man->lru)) { |
653 | if (list_empty(lru)) | 763 | spin_unlock(&glob->lru_lock); |
654 | break; | 764 | break; |
655 | 765 | } | |
656 | entry = list_first_entry(lru, struct ttm_buffer_object, lru); | ||
657 | kref_get(&entry->list_kref); | ||
658 | |||
659 | ret = | ||
660 | ttm_bo_reserve_locked(entry, interruptible, no_wait, | ||
661 | false, 0); | ||
662 | |||
663 | if (likely(ret == 0)) | ||
664 | put_count = ttm_bo_del_from_lru(entry); | ||
665 | |||
666 | spin_unlock(&glob->lru_lock); | 766 | spin_unlock(&glob->lru_lock); |
667 | 767 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, | |
768 | no_wait); | ||
668 | if (unlikely(ret != 0)) | 769 | if (unlikely(ret != 0)) |
669 | return ret; | 770 | return ret; |
670 | |||
671 | while (put_count--) | ||
672 | kref_put(&entry->list_kref, ttm_bo_ref_bug); | ||
673 | |||
674 | ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); | ||
675 | |||
676 | ttm_bo_unreserve(entry); | ||
677 | |||
678 | kref_put(&entry->list_kref, ttm_bo_release_list); | ||
679 | if (ret) | ||
680 | return ret; | ||
681 | |||
682 | spin_lock(&glob->lru_lock); | ||
683 | } while (1); | 771 | } while (1); |
684 | 772 | if (node == NULL) | |
685 | if (!node) { | ||
686 | spin_unlock(&glob->lru_lock); | ||
687 | return -ENOMEM; | 773 | return -ENOMEM; |
688 | } | ||
689 | |||
690 | node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); | ||
691 | if (unlikely(!node)) { | ||
692 | spin_unlock(&glob->lru_lock); | ||
693 | goto retry_pre_get; | ||
694 | } | ||
695 | |||
696 | spin_unlock(&glob->lru_lock); | ||
697 | mem->mm_node = node; | 774 | mem->mm_node = node; |
698 | mem->mem_type = mem_type; | 775 | mem->mem_type = mem_type; |
699 | return 0; | 776 | return 0; |
@@ -724,7 +801,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, | |||
724 | return result; | 801 | return result; |
725 | } | 802 | } |
726 | 803 | ||
727 | |||
728 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, | 804 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
729 | bool disallow_fixed, | 805 | bool disallow_fixed, |
730 | uint32_t mem_type, | 806 | uint32_t mem_type, |
@@ -757,66 +833,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, | |||
757 | * space. | 833 | * space. |
758 | */ | 834 | */ |
759 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, | 835 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
760 | uint32_t proposed_placement, | 836 | struct ttm_placement *placement, |
761 | struct ttm_mem_reg *mem, | 837 | struct ttm_mem_reg *mem, |
762 | bool interruptible, bool no_wait) | 838 | bool interruptible, bool no_wait) |
763 | { | 839 | { |
764 | struct ttm_bo_device *bdev = bo->bdev; | 840 | struct ttm_bo_device *bdev = bo->bdev; |
765 | struct ttm_bo_global *glob = bo->glob; | ||
766 | struct ttm_mem_type_manager *man; | 841 | struct ttm_mem_type_manager *man; |
767 | |||
768 | uint32_t num_prios = bdev->driver->num_mem_type_prio; | ||
769 | const uint32_t *prios = bdev->driver->mem_type_prio; | ||
770 | uint32_t i; | ||
771 | uint32_t mem_type = TTM_PL_SYSTEM; | 842 | uint32_t mem_type = TTM_PL_SYSTEM; |
772 | uint32_t cur_flags = 0; | 843 | uint32_t cur_flags = 0; |
773 | bool type_found = false; | 844 | bool type_found = false; |
774 | bool type_ok = false; | 845 | bool type_ok = false; |
775 | bool has_eagain = false; | 846 | bool has_erestartsys = false; |
776 | struct drm_mm_node *node = NULL; | 847 | struct drm_mm_node *node = NULL; |
777 | int ret; | 848 | int i, ret; |
778 | 849 | ||
779 | mem->mm_node = NULL; | 850 | mem->mm_node = NULL; |
780 | for (i = 0; i < num_prios; ++i) { | 851 | for (i = 0; i <= placement->num_placement; ++i) { |
781 | mem_type = prios[i]; | 852 | ret = ttm_mem_type_from_flags(placement->placement[i], |
853 | &mem_type); | ||
854 | if (ret) | ||
855 | return ret; | ||
782 | man = &bdev->man[mem_type]; | 856 | man = &bdev->man[mem_type]; |
783 | 857 | ||
784 | type_ok = ttm_bo_mt_compatible(man, | 858 | type_ok = ttm_bo_mt_compatible(man, |
785 | bo->type == ttm_bo_type_user, | 859 | bo->type == ttm_bo_type_user, |
786 | mem_type, proposed_placement, | 860 | mem_type, |
787 | &cur_flags); | 861 | placement->placement[i], |
862 | &cur_flags); | ||
788 | 863 | ||
789 | if (!type_ok) | 864 | if (!type_ok) |
790 | continue; | 865 | continue; |
791 | 866 | ||
792 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | 867 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
793 | cur_flags); | 868 | cur_flags); |
869 | /* | ||
870 | * Use the access and other non-mapping-related flag bits from | ||
871 | * the memory placement flags to the current flags | ||
872 | */ | ||
873 | ttm_flag_masked(&cur_flags, placement->placement[i], | ||
874 | ~TTM_PL_MASK_MEMTYPE); | ||
794 | 875 | ||
795 | if (mem_type == TTM_PL_SYSTEM) | 876 | if (mem_type == TTM_PL_SYSTEM) |
796 | break; | 877 | break; |
797 | 878 | ||
798 | if (man->has_type && man->use_type) { | 879 | if (man->has_type && man->use_type) { |
799 | type_found = true; | 880 | type_found = true; |
800 | do { | 881 | ret = ttm_bo_man_get_node(bo, man, placement, mem, |
801 | ret = drm_mm_pre_get(&man->manager); | 882 | &node); |
802 | if (unlikely(ret)) | 883 | if (unlikely(ret)) |
803 | return ret; | 884 | return ret; |
804 | |||
805 | spin_lock(&glob->lru_lock); | ||
806 | node = drm_mm_search_free(&man->manager, | ||
807 | mem->num_pages, | ||
808 | mem->page_alignment, | ||
809 | 1); | ||
810 | if (unlikely(!node)) { | ||
811 | spin_unlock(&glob->lru_lock); | ||
812 | break; | ||
813 | } | ||
814 | node = drm_mm_get_block_atomic(node, | ||
815 | mem->num_pages, | ||
816 | mem-> | ||
817 | page_alignment); | ||
818 | spin_unlock(&glob->lru_lock); | ||
819 | } while (!node); | ||
820 | } | 885 | } |
821 | if (node) | 886 | if (node) |
822 | break; | 887 | break; |
@@ -826,67 +891,65 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
826 | mem->mm_node = node; | 891 | mem->mm_node = node; |
827 | mem->mem_type = mem_type; | 892 | mem->mem_type = mem_type; |
828 | mem->placement = cur_flags; | 893 | mem->placement = cur_flags; |
894 | if (node) | ||
895 | node->private = bo; | ||
829 | return 0; | 896 | return 0; |
830 | } | 897 | } |
831 | 898 | ||
832 | if (!type_found) | 899 | if (!type_found) |
833 | return -EINVAL; | 900 | return -EINVAL; |
834 | 901 | ||
835 | num_prios = bdev->driver->num_mem_busy_prio; | 902 | for (i = 0; i <= placement->num_busy_placement; ++i) { |
836 | prios = bdev->driver->mem_busy_prio; | 903 | ret = ttm_mem_type_from_flags(placement->placement[i], |
837 | 904 | &mem_type); | |
838 | for (i = 0; i < num_prios; ++i) { | 905 | if (ret) |
839 | mem_type = prios[i]; | 906 | return ret; |
840 | man = &bdev->man[mem_type]; | 907 | man = &bdev->man[mem_type]; |
841 | |||
842 | if (!man->has_type) | 908 | if (!man->has_type) |
843 | continue; | 909 | continue; |
844 | |||
845 | if (!ttm_bo_mt_compatible(man, | 910 | if (!ttm_bo_mt_compatible(man, |
846 | bo->type == ttm_bo_type_user, | 911 | bo->type == ttm_bo_type_user, |
847 | mem_type, | 912 | mem_type, |
848 | proposed_placement, &cur_flags)) | 913 | placement->placement[i], |
914 | &cur_flags)) | ||
849 | continue; | 915 | continue; |
850 | 916 | ||
851 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | 917 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
852 | cur_flags); | 918 | cur_flags); |
919 | /* | ||
920 | * Use the access and other non-mapping-related flag bits from | ||
921 | * the memory placement flags to the current flags | ||
922 | */ | ||
923 | ttm_flag_masked(&cur_flags, placement->placement[i], | ||
924 | ~TTM_PL_MASK_MEMTYPE); | ||
853 | 925 | ||
854 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, | 926 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
855 | interruptible, no_wait); | 927 | interruptible, no_wait); |
856 | |||
857 | if (ret == 0 && mem->mm_node) { | 928 | if (ret == 0 && mem->mm_node) { |
858 | mem->placement = cur_flags; | 929 | mem->placement = cur_flags; |
930 | mem->mm_node->private = bo; | ||
859 | return 0; | 931 | return 0; |
860 | } | 932 | } |
861 | 933 | if (ret == -ERESTARTSYS) | |
862 | if (ret == -ERESTART) | 934 | has_erestartsys = true; |
863 | has_eagain = true; | ||
864 | } | 935 | } |
865 | 936 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; | |
866 | ret = (has_eagain) ? -ERESTART : -ENOMEM; | ||
867 | return ret; | 937 | return ret; |
868 | } | 938 | } |
869 | EXPORT_SYMBOL(ttm_bo_mem_space); | 939 | EXPORT_SYMBOL(ttm_bo_mem_space); |
870 | 940 | ||
871 | int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) | 941 | int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) |
872 | { | 942 | { |
873 | int ret = 0; | ||
874 | |||
875 | if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) | 943 | if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) |
876 | return -EBUSY; | 944 | return -EBUSY; |
877 | 945 | ||
878 | ret = wait_event_interruptible(bo->event_queue, | 946 | return wait_event_interruptible(bo->event_queue, |
879 | atomic_read(&bo->cpu_writers) == 0); | 947 | atomic_read(&bo->cpu_writers) == 0); |
880 | |||
881 | if (ret == -ERESTARTSYS) | ||
882 | ret = -ERESTART; | ||
883 | |||
884 | return ret; | ||
885 | } | 948 | } |
886 | 949 | ||
887 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | 950 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
888 | uint32_t proposed_placement, | 951 | struct ttm_placement *placement, |
889 | bool interruptible, bool no_wait) | 952 | bool interruptible, bool no_wait) |
890 | { | 953 | { |
891 | struct ttm_bo_global *glob = bo->glob; | 954 | struct ttm_bo_global *glob = bo->glob; |
892 | int ret = 0; | 955 | int ret = 0; |
@@ -899,101 +962,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
899 | * Have the driver move function wait for idle when necessary, | 962 | * Have the driver move function wait for idle when necessary, |
900 | * instead of doing it here. | 963 | * instead of doing it here. |
901 | */ | 964 | */ |
902 | |||
903 | spin_lock(&bo->lock); | 965 | spin_lock(&bo->lock); |
904 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); | 966 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); |
905 | spin_unlock(&bo->lock); | 967 | spin_unlock(&bo->lock); |
906 | |||
907 | if (ret) | 968 | if (ret) |
908 | return ret; | 969 | return ret; |
909 | |||
910 | mem.num_pages = bo->num_pages; | 970 | mem.num_pages = bo->num_pages; |
911 | mem.size = mem.num_pages << PAGE_SHIFT; | 971 | mem.size = mem.num_pages << PAGE_SHIFT; |
912 | mem.page_alignment = bo->mem.page_alignment; | 972 | mem.page_alignment = bo->mem.page_alignment; |
913 | |||
914 | /* | 973 | /* |
915 | * Determine where to move the buffer. | 974 | * Determine where to move the buffer. |
916 | */ | 975 | */ |
917 | 976 | ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); | |
918 | ret = ttm_bo_mem_space(bo, proposed_placement, &mem, | ||
919 | interruptible, no_wait); | ||
920 | if (ret) | 977 | if (ret) |
921 | goto out_unlock; | 978 | goto out_unlock; |
922 | |||
923 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); | 979 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); |
924 | |||
925 | out_unlock: | 980 | out_unlock: |
926 | if (ret && mem.mm_node) { | 981 | if (ret && mem.mm_node) { |
927 | spin_lock(&glob->lru_lock); | 982 | spin_lock(&glob->lru_lock); |
983 | mem.mm_node->private = NULL; | ||
928 | drm_mm_put_block(mem.mm_node); | 984 | drm_mm_put_block(mem.mm_node); |
929 | spin_unlock(&glob->lru_lock); | 985 | spin_unlock(&glob->lru_lock); |
930 | } | 986 | } |
931 | return ret; | 987 | return ret; |
932 | } | 988 | } |
933 | 989 | ||
934 | static int ttm_bo_mem_compat(uint32_t proposed_placement, | 990 | static int ttm_bo_mem_compat(struct ttm_placement *placement, |
935 | struct ttm_mem_reg *mem) | 991 | struct ttm_mem_reg *mem) |
936 | { | 992 | { |
937 | if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) | 993 | int i; |
938 | return 0; | 994 | |
939 | if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) | 995 | for (i = 0; i < placement->num_placement; i++) { |
940 | return 0; | 996 | if ((placement->placement[i] & mem->placement & |
941 | 997 | TTM_PL_MASK_CACHING) && | |
942 | return 1; | 998 | (placement->placement[i] & mem->placement & |
999 | TTM_PL_MASK_MEM)) | ||
1000 | return i; | ||
1001 | } | ||
1002 | return -1; | ||
943 | } | 1003 | } |
944 | 1004 | ||
945 | int ttm_buffer_object_validate(struct ttm_buffer_object *bo, | 1005 | int ttm_buffer_object_validate(struct ttm_buffer_object *bo, |
946 | uint32_t proposed_placement, | 1006 | struct ttm_placement *placement, |
947 | bool interruptible, bool no_wait) | 1007 | bool interruptible, bool no_wait) |
948 | { | 1008 | { |
949 | int ret; | 1009 | int ret; |
950 | 1010 | ||
951 | BUG_ON(!atomic_read(&bo->reserved)); | 1011 | BUG_ON(!atomic_read(&bo->reserved)); |
952 | bo->proposed_placement = proposed_placement; | 1012 | /* Check that range is valid */ |
953 | 1013 | if (placement->lpfn || placement->fpfn) | |
954 | TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", | 1014 | if (placement->fpfn > placement->lpfn || |
955 | (unsigned long)proposed_placement, | 1015 | (placement->lpfn - placement->fpfn) < bo->num_pages) |
956 | (unsigned long)bo->mem.placement); | 1016 | return -EINVAL; |
957 | |||
958 | /* | 1017 | /* |
959 | * Check whether we need to move buffer. | 1018 | * Check whether we need to move buffer. |
960 | */ | 1019 | */ |
961 | 1020 | ret = ttm_bo_mem_compat(placement, &bo->mem); | |
962 | if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { | 1021 | if (ret < 0) { |
963 | ret = ttm_bo_move_buffer(bo, bo->proposed_placement, | 1022 | ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); |
964 | interruptible, no_wait); | 1023 | if (ret) |
965 | if (ret) { | ||
966 | if (ret != -ERESTART) | ||
967 | printk(KERN_ERR TTM_PFX | ||
968 | "Failed moving buffer. " | ||
969 | "Proposed placement 0x%08x\n", | ||
970 | bo->proposed_placement); | ||
971 | if (ret == -ENOMEM) | ||
972 | printk(KERN_ERR TTM_PFX | ||
973 | "Out of aperture space or " | ||
974 | "DRM memory quota.\n"); | ||
975 | return ret; | 1024 | return ret; |
976 | } | 1025 | } else { |
1026 | /* | ||
1027 | * Use the access and other non-mapping-related flag bits from | ||
1028 | * the compatible memory placement flags to the active flags | ||
1029 | */ | ||
1030 | ttm_flag_masked(&bo->mem.placement, placement->placement[ret], | ||
1031 | ~TTM_PL_MASK_MEMTYPE); | ||
977 | } | 1032 | } |
978 | |||
979 | /* | 1033 | /* |
980 | * We might need to add a TTM. | 1034 | * We might need to add a TTM. |
981 | */ | 1035 | */ |
982 | |||
983 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | 1036 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
984 | ret = ttm_bo_add_ttm(bo, true); | 1037 | ret = ttm_bo_add_ttm(bo, true); |
985 | if (ret) | 1038 | if (ret) |
986 | return ret; | 1039 | return ret; |
987 | } | 1040 | } |
988 | /* | ||
989 | * Validation has succeeded, move the access and other | ||
990 | * non-mapping-related flag bits from the proposed flags to | ||
991 | * the active flags | ||
992 | */ | ||
993 | |||
994 | ttm_flag_masked(&bo->mem.placement, bo->proposed_placement, | ||
995 | ~TTM_PL_MASK_MEMTYPE); | ||
996 | |||
997 | return 0; | 1041 | return 0; |
998 | } | 1042 | } |
999 | EXPORT_SYMBOL(ttm_buffer_object_validate); | 1043 | EXPORT_SYMBOL(ttm_buffer_object_validate); |
@@ -1041,8 +1085,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, | |||
1041 | size_t acc_size, | 1085 | size_t acc_size, |
1042 | void (*destroy) (struct ttm_buffer_object *)) | 1086 | void (*destroy) (struct ttm_buffer_object *)) |
1043 | { | 1087 | { |
1044 | int ret = 0; | 1088 | int i, c, ret = 0; |
1045 | unsigned long num_pages; | 1089 | unsigned long num_pages; |
1090 | uint32_t placements[8]; | ||
1091 | struct ttm_placement placement; | ||
1046 | 1092 | ||
1047 | size += buffer_start & ~PAGE_MASK; | 1093 | size += buffer_start & ~PAGE_MASK; |
1048 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1094 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
@@ -1099,7 +1145,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, | |||
1099 | goto out_err; | 1145 | goto out_err; |
1100 | } | 1146 | } |
1101 | 1147 | ||
1102 | ret = ttm_buffer_object_validate(bo, flags, interruptible, false); | 1148 | placement.fpfn = 0; |
1149 | placement.lpfn = 0; | ||
1150 | for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++) | ||
1151 | if (flags & (1 << i)) | ||
1152 | placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i); | ||
1153 | placement.placement = placements; | ||
1154 | placement.num_placement = c; | ||
1155 | placement.busy_placement = placements; | ||
1156 | placement.num_busy_placement = c; | ||
1157 | ret = ttm_buffer_object_validate(bo, &placement, interruptible, false); | ||
1103 | if (ret) | 1158 | if (ret) |
1104 | goto out_err; | 1159 | goto out_err; |
1105 | 1160 | ||
@@ -1134,8 +1189,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, | |||
1134 | struct ttm_buffer_object **p_bo) | 1189 | struct ttm_buffer_object **p_bo) |
1135 | { | 1190 | { |
1136 | struct ttm_buffer_object *bo; | 1191 | struct ttm_buffer_object *bo; |
1137 | int ret; | ||
1138 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; | 1192 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; |
1193 | int ret; | ||
1139 | 1194 | ||
1140 | size_t acc_size = | 1195 | size_t acc_size = |
1141 | ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); | 1196 | ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); |
@@ -1160,66 +1215,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, | |||
1160 | return ret; | 1215 | return ret; |
1161 | } | 1216 | } |
1162 | 1217 | ||
1163 | static int ttm_bo_leave_list(struct ttm_buffer_object *bo, | ||
1164 | uint32_t mem_type, bool allow_errors) | ||
1165 | { | ||
1166 | int ret; | ||
1167 | |||
1168 | spin_lock(&bo->lock); | ||
1169 | ret = ttm_bo_wait(bo, false, false, false); | ||
1170 | spin_unlock(&bo->lock); | ||
1171 | |||
1172 | if (ret && allow_errors) | ||
1173 | goto out; | ||
1174 | |||
1175 | if (bo->mem.mem_type == mem_type) | ||
1176 | ret = ttm_bo_evict(bo, mem_type, false, false); | ||
1177 | |||
1178 | if (ret) { | ||
1179 | if (allow_errors) { | ||
1180 | goto out; | ||
1181 | } else { | ||
1182 | ret = 0; | ||
1183 | printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n"); | ||
1184 | } | ||
1185 | } | ||
1186 | |||
1187 | out: | ||
1188 | return ret; | ||
1189 | } | ||
1190 | |||
1191 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | 1218 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, |
1192 | struct list_head *head, | 1219 | unsigned mem_type, bool allow_errors) |
1193 | unsigned mem_type, bool allow_errors) | ||
1194 | { | 1220 | { |
1221 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | ||
1195 | struct ttm_bo_global *glob = bdev->glob; | 1222 | struct ttm_bo_global *glob = bdev->glob; |
1196 | struct ttm_buffer_object *entry; | ||
1197 | int ret; | 1223 | int ret; |
1198 | int put_count; | ||
1199 | 1224 | ||
1200 | /* | 1225 | /* |
1201 | * Can't use standard list traversal since we're unlocking. | 1226 | * Can't use standard list traversal since we're unlocking. |
1202 | */ | 1227 | */ |
1203 | 1228 | ||
1204 | spin_lock(&glob->lru_lock); | 1229 | spin_lock(&glob->lru_lock); |
1205 | 1230 | while (!list_empty(&man->lru)) { | |
1206 | while (!list_empty(head)) { | ||
1207 | entry = list_first_entry(head, struct ttm_buffer_object, lru); | ||
1208 | kref_get(&entry->list_kref); | ||
1209 | ret = ttm_bo_reserve_locked(entry, false, false, false, 0); | ||
1210 | put_count = ttm_bo_del_from_lru(entry); | ||
1211 | spin_unlock(&glob->lru_lock); | 1231 | spin_unlock(&glob->lru_lock); |
1212 | while (put_count--) | 1232 | ret = ttm_mem_evict_first(bdev, mem_type, false, false); |
1213 | kref_put(&entry->list_kref, ttm_bo_ref_bug); | 1233 | if (ret) { |
1214 | BUG_ON(ret); | 1234 | if (allow_errors) { |
1215 | ret = ttm_bo_leave_list(entry, mem_type, allow_errors); | 1235 | return ret; |
1216 | ttm_bo_unreserve(entry); | 1236 | } else { |
1217 | kref_put(&entry->list_kref, ttm_bo_release_list); | 1237 | printk(KERN_ERR TTM_PFX |
1238 | "Cleanup eviction failed\n"); | ||
1239 | } | ||
1240 | } | ||
1218 | spin_lock(&glob->lru_lock); | 1241 | spin_lock(&glob->lru_lock); |
1219 | } | 1242 | } |
1220 | |||
1221 | spin_unlock(&glob->lru_lock); | 1243 | spin_unlock(&glob->lru_lock); |
1222 | |||
1223 | return 0; | 1244 | return 0; |
1224 | } | 1245 | } |
1225 | 1246 | ||
@@ -1246,7 +1267,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1246 | 1267 | ||
1247 | ret = 0; | 1268 | ret = 0; |
1248 | if (mem_type > 0) { | 1269 | if (mem_type > 0) { |
1249 | ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); | 1270 | ttm_bo_force_list_clean(bdev, mem_type, false); |
1250 | 1271 | ||
1251 | spin_lock(&glob->lru_lock); | 1272 | spin_lock(&glob->lru_lock); |
1252 | if (drm_mm_clean(&man->manager)) | 1273 | if (drm_mm_clean(&man->manager)) |
@@ -1279,12 +1300,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1279 | return 0; | 1300 | return 0; |
1280 | } | 1301 | } |
1281 | 1302 | ||
1282 | return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); | 1303 | return ttm_bo_force_list_clean(bdev, mem_type, true); |
1283 | } | 1304 | } |
1284 | EXPORT_SYMBOL(ttm_bo_evict_mm); | 1305 | EXPORT_SYMBOL(ttm_bo_evict_mm); |
1285 | 1306 | ||
1286 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | 1307 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
1287 | unsigned long p_offset, unsigned long p_size) | 1308 | unsigned long p_size) |
1288 | { | 1309 | { |
1289 | int ret = -EINVAL; | 1310 | int ret = -EINVAL; |
1290 | struct ttm_mem_type_manager *man; | 1311 | struct ttm_mem_type_manager *man; |
@@ -1314,7 +1335,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1314 | type); | 1335 | type); |
1315 | return ret; | 1336 | return ret; |
1316 | } | 1337 | } |
1317 | ret = drm_mm_init(&man->manager, p_offset, p_size); | 1338 | ret = drm_mm_init(&man->manager, 0, p_size); |
1318 | if (ret) | 1339 | if (ret) |
1319 | return ret; | 1340 | return ret; |
1320 | } | 1341 | } |
@@ -1463,7 +1484,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1463 | * Initialize the system memory buffer type. | 1484 | * Initialize the system memory buffer type. |
1464 | * Other types need to be driver / IOCTL initialized. | 1485 | * Other types need to be driver / IOCTL initialized. |
1465 | */ | 1486 | */ |
1466 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); | 1487 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
1467 | if (unlikely(ret != 0)) | 1488 | if (unlikely(ret != 0)) |
1468 | goto out_no_sys; | 1489 | goto out_no_sys; |
1469 | 1490 | ||
@@ -1693,7 +1714,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, | |||
1693 | ret = wait_event_interruptible | 1714 | ret = wait_event_interruptible |
1694 | (bo->event_queue, atomic_read(&bo->reserved) == 0); | 1715 | (bo->event_queue, atomic_read(&bo->reserved) == 0); |
1695 | if (unlikely(ret != 0)) | 1716 | if (unlikely(ret != 0)) |
1696 | return -ERESTART; | 1717 | return ret; |
1697 | } else { | 1718 | } else { |
1698 | wait_event(bo->event_queue, | 1719 | wait_event(bo->event_queue, |
1699 | atomic_read(&bo->reserved) == 0); | 1720 | atomic_read(&bo->reserved) == 0); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 61c5572d2b91..2ecf7d0c64f6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |||
369 | #endif | 369 | #endif |
370 | return tmp; | 370 | return tmp; |
371 | } | 371 | } |
372 | EXPORT_SYMBOL(ttm_io_prot); | ||
372 | 373 | ||
373 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | 374 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
374 | unsigned long bus_base, | 375 | unsigned long bus_base, |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 1c040d040338..609a85a4d855 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
114 | ret = ttm_bo_wait(bo, false, true, false); | 114 | ret = ttm_bo_wait(bo, false, true, false); |
115 | spin_unlock(&bo->lock); | 115 | spin_unlock(&bo->lock); |
116 | if (unlikely(ret != 0)) { | 116 | if (unlikely(ret != 0)) { |
117 | retval = (ret != -ERESTART) ? | 117 | retval = (ret != -ERESTARTSYS) ? |
118 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; | 118 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; |
119 | goto out_unlock; | 119 | goto out_unlock; |
120 | } | 120 | } |
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, | |||
349 | switch (ret) { | 349 | switch (ret) { |
350 | case 0: | 350 | case 0: |
351 | break; | 351 | break; |
352 | case -ERESTART: | ||
353 | ret = -EINTR; | ||
354 | goto out_unref; | ||
355 | case -EBUSY: | 352 | case -EBUSY: |
356 | ret = -EAGAIN; | 353 | ret = -EAGAIN; |
357 | goto out_unref; | 354 | goto out_unref; |
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, | |||
421 | switch (ret) { | 418 | switch (ret) { |
422 | case 0: | 419 | case 0: |
423 | break; | 420 | break; |
424 | case -ERESTART: | ||
425 | return -EINTR; | ||
426 | case -EBUSY: | 421 | case -EBUSY: |
427 | return -EAGAIN; | 422 | return -EAGAIN; |
428 | default: | 423 | default: |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c new file mode 100644 index 000000000000..c285c2902d15 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "ttm/ttm_execbuf_util.h" | ||
29 | #include "ttm/ttm_bo_driver.h" | ||
30 | #include "ttm/ttm_placement.h" | ||
31 | #include <linux/wait.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/module.h> | ||
34 | |||
35 | void ttm_eu_backoff_reservation(struct list_head *list) | ||
36 | { | ||
37 | struct ttm_validate_buffer *entry; | ||
38 | |||
39 | list_for_each_entry(entry, list, head) { | ||
40 | struct ttm_buffer_object *bo = entry->bo; | ||
41 | if (!entry->reserved) | ||
42 | continue; | ||
43 | |||
44 | entry->reserved = false; | ||
45 | ttm_bo_unreserve(bo); | ||
46 | } | ||
47 | } | ||
48 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | ||
49 | |||
50 | /* | ||
51 | * Reserve buffers for validation. | ||
52 | * | ||
53 | * If a buffer in the list is marked for CPU access, we back off and | ||
54 | * wait for that buffer to become free for GPU access. | ||
55 | * | ||
56 | * If a buffer is reserved for another validation, the validator with | ||
57 | * the highest validation sequence backs off and waits for that buffer | ||
58 | * to become unreserved. This prevents deadlocks when validating multiple | ||
59 | * buffers in different orders. | ||
60 | */ | ||
61 | |||
62 | int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) | ||
63 | { | ||
64 | struct ttm_validate_buffer *entry; | ||
65 | int ret; | ||
66 | |||
67 | retry: | ||
68 | list_for_each_entry(entry, list, head) { | ||
69 | struct ttm_buffer_object *bo = entry->bo; | ||
70 | |||
71 | entry->reserved = false; | ||
72 | ret = ttm_bo_reserve(bo, true, false, true, val_seq); | ||
73 | if (ret != 0) { | ||
74 | ttm_eu_backoff_reservation(list); | ||
75 | if (ret == -EAGAIN) { | ||
76 | ret = ttm_bo_wait_unreserved(bo, true); | ||
77 | if (unlikely(ret != 0)) | ||
78 | return ret; | ||
79 | goto retry; | ||
80 | } else | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | entry->reserved = true; | ||
85 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | ||
86 | ttm_eu_backoff_reservation(list); | ||
87 | ret = ttm_bo_wait_cpu(bo, false); | ||
88 | if (ret) | ||
89 | return ret; | ||
90 | goto retry; | ||
91 | } | ||
92 | } | ||
93 | return 0; | ||
94 | } | ||
95 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | ||
96 | |||
97 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) | ||
98 | { | ||
99 | struct ttm_validate_buffer *entry; | ||
100 | |||
101 | list_for_each_entry(entry, list, head) { | ||
102 | struct ttm_buffer_object *bo = entry->bo; | ||
103 | struct ttm_bo_driver *driver = bo->bdev->driver; | ||
104 | void *old_sync_obj; | ||
105 | |||
106 | spin_lock(&bo->lock); | ||
107 | old_sync_obj = bo->sync_obj; | ||
108 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | ||
109 | bo->sync_obj_arg = entry->new_sync_obj_arg; | ||
110 | spin_unlock(&bo->lock); | ||
111 | ttm_bo_unreserve(bo); | ||
112 | entry->reserved = false; | ||
113 | if (old_sync_obj) | ||
114 | driver->sync_obj_unref(&old_sync_obj); | ||
115 | } | ||
116 | } | ||
117 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c new file mode 100644 index 000000000000..f619ebcaa4ec --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -0,0 +1,311 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include "ttm/ttm_lock.h" | ||
32 | #include "ttm/ttm_module.h" | ||
33 | #include <asm/atomic.h> | ||
34 | #include <linux/errno.h> | ||
35 | #include <linux/wait.h> | ||
36 | #include <linux/sched.h> | ||
37 | #include <linux/module.h> | ||
38 | |||
39 | #define TTM_WRITE_LOCK_PENDING (1 << 0) | ||
40 | #define TTM_VT_LOCK_PENDING (1 << 1) | ||
41 | #define TTM_SUSPEND_LOCK_PENDING (1 << 2) | ||
42 | #define TTM_VT_LOCK (1 << 3) | ||
43 | #define TTM_SUSPEND_LOCK (1 << 4) | ||
44 | |||
45 | void ttm_lock_init(struct ttm_lock *lock) | ||
46 | { | ||
47 | spin_lock_init(&lock->lock); | ||
48 | init_waitqueue_head(&lock->queue); | ||
49 | lock->rw = 0; | ||
50 | lock->flags = 0; | ||
51 | lock->kill_takers = false; | ||
52 | lock->signal = SIGKILL; | ||
53 | } | ||
54 | EXPORT_SYMBOL(ttm_lock_init); | ||
55 | |||
56 | void ttm_read_unlock(struct ttm_lock *lock) | ||
57 | { | ||
58 | spin_lock(&lock->lock); | ||
59 | if (--lock->rw == 0) | ||
60 | wake_up_all(&lock->queue); | ||
61 | spin_unlock(&lock->lock); | ||
62 | } | ||
63 | EXPORT_SYMBOL(ttm_read_unlock); | ||
64 | |||
65 | static bool __ttm_read_lock(struct ttm_lock *lock) | ||
66 | { | ||
67 | bool locked = false; | ||
68 | |||
69 | spin_lock(&lock->lock); | ||
70 | if (unlikely(lock->kill_takers)) { | ||
71 | send_sig(lock->signal, current, 0); | ||
72 | spin_unlock(&lock->lock); | ||
73 | return false; | ||
74 | } | ||
75 | if (lock->rw >= 0 && lock->flags == 0) { | ||
76 | ++lock->rw; | ||
77 | locked = true; | ||
78 | } | ||
79 | spin_unlock(&lock->lock); | ||
80 | return locked; | ||
81 | } | ||
82 | |||
83 | int ttm_read_lock(struct ttm_lock *lock, bool interruptible) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | |||
87 | if (interruptible) | ||
88 | ret = wait_event_interruptible(lock->queue, | ||
89 | __ttm_read_lock(lock)); | ||
90 | else | ||
91 | wait_event(lock->queue, __ttm_read_lock(lock)); | ||
92 | return ret; | ||
93 | } | ||
94 | EXPORT_SYMBOL(ttm_read_lock); | ||
95 | |||
96 | static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) | ||
97 | { | ||
98 | bool block = true; | ||
99 | |||
100 | *locked = false; | ||
101 | |||
102 | spin_lock(&lock->lock); | ||
103 | if (unlikely(lock->kill_takers)) { | ||
104 | send_sig(lock->signal, current, 0); | ||
105 | spin_unlock(&lock->lock); | ||
106 | return false; | ||
107 | } | ||
108 | if (lock->rw >= 0 && lock->flags == 0) { | ||
109 | ++lock->rw; | ||
110 | block = false; | ||
111 | *locked = true; | ||
112 | } else if (lock->flags == 0) { | ||
113 | block = false; | ||
114 | } | ||
115 | spin_unlock(&lock->lock); | ||
116 | |||
117 | return !block; | ||
118 | } | ||
119 | |||
120 | int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) | ||
121 | { | ||
122 | int ret = 0; | ||
123 | bool locked; | ||
124 | |||
125 | if (interruptible) | ||
126 | ret = wait_event_interruptible | ||
127 | (lock->queue, __ttm_read_trylock(lock, &locked)); | ||
128 | else | ||
129 | wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); | ||
130 | |||
131 | if (unlikely(ret != 0)) { | ||
132 | BUG_ON(locked); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | return (locked) ? 0 : -EBUSY; | ||
137 | } | ||
138 | |||
139 | void ttm_write_unlock(struct ttm_lock *lock) | ||
140 | { | ||
141 | spin_lock(&lock->lock); | ||
142 | lock->rw = 0; | ||
143 | wake_up_all(&lock->queue); | ||
144 | spin_unlock(&lock->lock); | ||
145 | } | ||
146 | EXPORT_SYMBOL(ttm_write_unlock); | ||
147 | |||
148 | static bool __ttm_write_lock(struct ttm_lock *lock) | ||
149 | { | ||
150 | bool locked = false; | ||
151 | |||
152 | spin_lock(&lock->lock); | ||
153 | if (unlikely(lock->kill_takers)) { | ||
154 | send_sig(lock->signal, current, 0); | ||
155 | spin_unlock(&lock->lock); | ||
156 | return false; | ||
157 | } | ||
158 | if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { | ||
159 | lock->rw = -1; | ||
160 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | ||
161 | locked = true; | ||
162 | } else { | ||
163 | lock->flags |= TTM_WRITE_LOCK_PENDING; | ||
164 | } | ||
165 | spin_unlock(&lock->lock); | ||
166 | return locked; | ||
167 | } | ||
168 | |||
169 | int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | ||
170 | { | ||
171 | int ret = 0; | ||
172 | |||
173 | if (interruptible) { | ||
174 | ret = wait_event_interruptible(lock->queue, | ||
175 | __ttm_write_lock(lock)); | ||
176 | if (unlikely(ret != 0)) { | ||
177 | spin_lock(&lock->lock); | ||
178 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | ||
179 | wake_up_all(&lock->queue); | ||
180 | spin_unlock(&lock->lock); | ||
181 | } | ||
182 | } else | ||
183 | wait_event(lock->queue, __ttm_read_lock(lock)); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | EXPORT_SYMBOL(ttm_write_lock); | ||
188 | |||
189 | void ttm_write_lock_downgrade(struct ttm_lock *lock) | ||
190 | { | ||
191 | spin_lock(&lock->lock); | ||
192 | lock->rw = 1; | ||
193 | wake_up_all(&lock->queue); | ||
194 | spin_unlock(&lock->lock); | ||
195 | } | ||
196 | |||
197 | static int __ttm_vt_unlock(struct ttm_lock *lock) | ||
198 | { | ||
199 | int ret = 0; | ||
200 | |||
201 | spin_lock(&lock->lock); | ||
202 | if (unlikely(!(lock->flags & TTM_VT_LOCK))) | ||
203 | ret = -EINVAL; | ||
204 | lock->flags &= ~TTM_VT_LOCK; | ||
205 | wake_up_all(&lock->queue); | ||
206 | spin_unlock(&lock->lock); | ||
207 | printk(KERN_INFO TTM_PFX "vt unlock.\n"); | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | static void ttm_vt_lock_remove(struct ttm_base_object **p_base) | ||
213 | { | ||
214 | struct ttm_base_object *base = *p_base; | ||
215 | struct ttm_lock *lock = container_of(base, struct ttm_lock, base); | ||
216 | int ret; | ||
217 | |||
218 | *p_base = NULL; | ||
219 | ret = __ttm_vt_unlock(lock); | ||
220 | BUG_ON(ret != 0); | ||
221 | } | ||
222 | |||
223 | static bool __ttm_vt_lock(struct ttm_lock *lock) | ||
224 | { | ||
225 | bool locked = false; | ||
226 | |||
227 | spin_lock(&lock->lock); | ||
228 | if (lock->rw == 0) { | ||
229 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
230 | lock->flags |= TTM_VT_LOCK; | ||
231 | locked = true; | ||
232 | } else { | ||
233 | lock->flags |= TTM_VT_LOCK_PENDING; | ||
234 | } | ||
235 | spin_unlock(&lock->lock); | ||
236 | return locked; | ||
237 | } | ||
238 | |||
239 | int ttm_vt_lock(struct ttm_lock *lock, | ||
240 | bool interruptible, | ||
241 | struct ttm_object_file *tfile) | ||
242 | { | ||
243 | int ret = 0; | ||
244 | |||
245 | if (interruptible) { | ||
246 | ret = wait_event_interruptible(lock->queue, | ||
247 | __ttm_vt_lock(lock)); | ||
248 | if (unlikely(ret != 0)) { | ||
249 | spin_lock(&lock->lock); | ||
250 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
251 | wake_up_all(&lock->queue); | ||
252 | spin_unlock(&lock->lock); | ||
253 | return ret; | ||
254 | } | ||
255 | } else | ||
256 | wait_event(lock->queue, __ttm_vt_lock(lock)); | ||
257 | |||
258 | /* | ||
259 | * Add a base-object, the destructor of which will | ||
260 | * make sure the lock is released if the client dies | ||
261 | * while holding it. | ||
262 | */ | ||
263 | |||
264 | ret = ttm_base_object_init(tfile, &lock->base, false, | ||
265 | ttm_lock_type, &ttm_vt_lock_remove, NULL); | ||
266 | if (ret) | ||
267 | (void)__ttm_vt_unlock(lock); | ||
268 | else { | ||
269 | lock->vt_holder = tfile; | ||
270 | printk(KERN_INFO TTM_PFX "vt lock.\n"); | ||
271 | } | ||
272 | |||
273 | return ret; | ||
274 | } | ||
275 | EXPORT_SYMBOL(ttm_vt_lock); | ||
276 | |||
277 | int ttm_vt_unlock(struct ttm_lock *lock) | ||
278 | { | ||
279 | return ttm_ref_object_base_unref(lock->vt_holder, | ||
280 | lock->base.hash.key, TTM_REF_USAGE); | ||
281 | } | ||
282 | EXPORT_SYMBOL(ttm_vt_unlock); | ||
283 | |||
284 | void ttm_suspend_unlock(struct ttm_lock *lock) | ||
285 | { | ||
286 | spin_lock(&lock->lock); | ||
287 | lock->flags &= ~TTM_SUSPEND_LOCK; | ||
288 | wake_up_all(&lock->queue); | ||
289 | spin_unlock(&lock->lock); | ||
290 | } | ||
291 | |||
292 | static bool __ttm_suspend_lock(struct ttm_lock *lock) | ||
293 | { | ||
294 | bool locked = false; | ||
295 | |||
296 | spin_lock(&lock->lock); | ||
297 | if (lock->rw == 0) { | ||
298 | lock->flags &= ~TTM_SUSPEND_LOCK_PENDING; | ||
299 | lock->flags |= TTM_SUSPEND_LOCK; | ||
300 | locked = true; | ||
301 | } else { | ||
302 | lock->flags |= TTM_SUSPEND_LOCK_PENDING; | ||
303 | } | ||
304 | spin_unlock(&lock->lock); | ||
305 | return locked; | ||
306 | } | ||
307 | |||
308 | void ttm_suspend_lock(struct ttm_lock *lock) | ||
309 | { | ||
310 | wait_event(lock->queue, __ttm_suspend_lock(lock)); | ||
311 | } | ||
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 072c281a6bb5..f5245c02b8fd 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, | |||
274 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, | 274 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, |
275 | const struct sysinfo *si) | 275 | const struct sysinfo *si) |
276 | { | 276 | { |
277 | struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); | 277 | struct ttm_mem_zone *zone; |
278 | uint64_t mem; | 278 | uint64_t mem; |
279 | int ret; | 279 | int ret; |
280 | 280 | ||
281 | if (unlikely(!zone)) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | if (si->totalhigh == 0) | 281 | if (si->totalhigh == 0) |
285 | return 0; | 282 | return 0; |
286 | 283 | ||
284 | zone = kzalloc(sizeof(*zone), GFP_KERNEL); | ||
285 | if (unlikely(!zone)) | ||
286 | return -ENOMEM; | ||
287 | |||
287 | mem = si->totalram; | 288 | mem = si->totalram; |
288 | mem *= si->mem_unit; | 289 | mem *= si->mem_unit; |
289 | 290 | ||
@@ -322,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, | |||
322 | * No special dma32 zone needed. | 323 | * No special dma32 zone needed. |
323 | */ | 324 | */ |
324 | 325 | ||
325 | if (mem <= ((uint64_t) 1ULL << 32)) | 326 | if (mem <= ((uint64_t) 1ULL << 32)) { |
327 | kfree(zone); | ||
326 | return 0; | 328 | return 0; |
329 | } | ||
327 | 330 | ||
328 | /* | 331 | /* |
329 | * Limit max dma32 memory to 4GB for now | 332 | * Limit max dma32 memory to 4GB for now |
@@ -460,6 +463,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob, | |||
460 | { | 463 | { |
461 | return ttm_mem_global_free_zone(glob, NULL, amount); | 464 | return ttm_mem_global_free_zone(glob, NULL, amount); |
462 | } | 465 | } |
466 | EXPORT_SYMBOL(ttm_mem_global_free); | ||
463 | 467 | ||
464 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, | 468 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, |
465 | struct ttm_mem_zone *single_zone, | 469 | struct ttm_mem_zone *single_zone, |
@@ -533,6 +537,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, | |||
533 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, | 537 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, |
534 | interruptible); | 538 | interruptible); |
535 | } | 539 | } |
540 | EXPORT_SYMBOL(ttm_mem_global_alloc); | ||
536 | 541 | ||
537 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, | 542 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, |
538 | struct page *page, | 543 | struct page *page, |
@@ -588,3 +593,4 @@ size_t ttm_round_pot(size_t size) | |||
588 | } | 593 | } |
589 | return 0; | 594 | return 0; |
590 | } | 595 | } |
596 | EXPORT_SYMBOL(ttm_round_pot); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c new file mode 100644 index 000000000000..1099abac824b --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -0,0 +1,452 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | /** @file ttm_ref_object.c | ||
31 | * | ||
32 | * Base- and reference object implementation for the various | ||
33 | * ttm objects. Implements reference counting, minimal security checks | ||
34 | * and release on file close. | ||
35 | */ | ||
36 | |||
37 | /** | ||
38 | * struct ttm_object_file | ||
39 | * | ||
40 | * @tdev: Pointer to the ttm_object_device. | ||
41 | * | ||
42 | * @lock: Lock that protects the ref_list list and the | ||
43 | * ref_hash hash tables. | ||
44 | * | ||
45 | * @ref_list: List of ttm_ref_objects to be destroyed at | ||
46 | * file release. | ||
47 | * | ||
48 | * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, | ||
49 | * for fast lookup of ref objects given a base object. | ||
50 | */ | ||
51 | |||
52 | #include "ttm/ttm_object.h" | ||
53 | #include "ttm/ttm_module.h" | ||
54 | #include <linux/list.h> | ||
55 | #include <linux/spinlock.h> | ||
56 | #include <linux/slab.h> | ||
57 | #include <linux/module.h> | ||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | struct ttm_object_file { | ||
61 | struct ttm_object_device *tdev; | ||
62 | rwlock_t lock; | ||
63 | struct list_head ref_list; | ||
64 | struct drm_open_hash ref_hash[TTM_REF_NUM]; | ||
65 | struct kref refcount; | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * struct ttm_object_device | ||
70 | * | ||
71 | * @object_lock: lock that protects the object_hash hash table. | ||
72 | * | ||
73 | * @object_hash: hash table for fast lookup of object global names. | ||
74 | * | ||
75 | * @object_count: Per device object count. | ||
76 | * | ||
77 | * This is the per-device data structure needed for ttm object management. | ||
78 | */ | ||
79 | |||
80 | struct ttm_object_device { | ||
81 | rwlock_t object_lock; | ||
82 | struct drm_open_hash object_hash; | ||
83 | atomic_t object_count; | ||
84 | struct ttm_mem_global *mem_glob; | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * struct ttm_ref_object | ||
89 | * | ||
90 | * @hash: Hash entry for the per-file object reference hash. | ||
91 | * | ||
92 | * @head: List entry for the per-file list of ref-objects. | ||
93 | * | ||
94 | * @kref: Ref count. | ||
95 | * | ||
96 | * @obj: Base object this ref object is referencing. | ||
97 | * | ||
98 | * @ref_type: Type of ref object. | ||
99 | * | ||
100 | * This is similar to an idr object, but it also has a hash table entry | ||
101 | * that allows lookup with a pointer to the referenced object as a key. In | ||
102 | * that way, one can easily detect whether a base object is referenced by | ||
103 | * a particular ttm_object_file. It also carries a ref count to avoid creating | ||
104 | * multiple ref objects if a ttm_object_file references the same base | ||
105 | * object more than once. | ||
106 | */ | ||
107 | |||
108 | struct ttm_ref_object { | ||
109 | struct drm_hash_item hash; | ||
110 | struct list_head head; | ||
111 | struct kref kref; | ||
112 | struct ttm_base_object *obj; | ||
113 | enum ttm_ref_type ref_type; | ||
114 | struct ttm_object_file *tfile; | ||
115 | }; | ||
116 | |||
117 | static inline struct ttm_object_file * | ||
118 | ttm_object_file_ref(struct ttm_object_file *tfile) | ||
119 | { | ||
120 | kref_get(&tfile->refcount); | ||
121 | return tfile; | ||
122 | } | ||
123 | |||
124 | static void ttm_object_file_destroy(struct kref *kref) | ||
125 | { | ||
126 | struct ttm_object_file *tfile = | ||
127 | container_of(kref, struct ttm_object_file, refcount); | ||
128 | |||
129 | kfree(tfile); | ||
130 | } | ||
131 | |||
132 | |||
133 | static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) | ||
134 | { | ||
135 | struct ttm_object_file *tfile = *p_tfile; | ||
136 | |||
137 | *p_tfile = NULL; | ||
138 | kref_put(&tfile->refcount, ttm_object_file_destroy); | ||
139 | } | ||
140 | |||
141 | |||
142 | int ttm_base_object_init(struct ttm_object_file *tfile, | ||
143 | struct ttm_base_object *base, | ||
144 | bool shareable, | ||
145 | enum ttm_object_type object_type, | ||
146 | void (*refcount_release) (struct ttm_base_object **), | ||
147 | void (*ref_obj_release) (struct ttm_base_object *, | ||
148 | enum ttm_ref_type ref_type)) | ||
149 | { | ||
150 | struct ttm_object_device *tdev = tfile->tdev; | ||
151 | int ret; | ||
152 | |||
153 | base->shareable = shareable; | ||
154 | base->tfile = ttm_object_file_ref(tfile); | ||
155 | base->refcount_release = refcount_release; | ||
156 | base->ref_obj_release = ref_obj_release; | ||
157 | base->object_type = object_type; | ||
158 | write_lock(&tdev->object_lock); | ||
159 | kref_init(&base->refcount); | ||
160 | ret = drm_ht_just_insert_please(&tdev->object_hash, | ||
161 | &base->hash, | ||
162 | (unsigned long)base, 31, 0, 0); | ||
163 | write_unlock(&tdev->object_lock); | ||
164 | if (unlikely(ret != 0)) | ||
165 | goto out_err0; | ||
166 | |||
167 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); | ||
168 | if (unlikely(ret != 0)) | ||
169 | goto out_err1; | ||
170 | |||
171 | ttm_base_object_unref(&base); | ||
172 | |||
173 | return 0; | ||
174 | out_err1: | ||
175 | (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); | ||
176 | out_err0: | ||
177 | return ret; | ||
178 | } | ||
179 | EXPORT_SYMBOL(ttm_base_object_init); | ||
180 | |||
181 | static void ttm_release_base(struct kref *kref) | ||
182 | { | ||
183 | struct ttm_base_object *base = | ||
184 | container_of(kref, struct ttm_base_object, refcount); | ||
185 | struct ttm_object_device *tdev = base->tfile->tdev; | ||
186 | |||
187 | (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); | ||
188 | write_unlock(&tdev->object_lock); | ||
189 | if (base->refcount_release) { | ||
190 | ttm_object_file_unref(&base->tfile); | ||
191 | base->refcount_release(&base); | ||
192 | } | ||
193 | write_lock(&tdev->object_lock); | ||
194 | } | ||
195 | |||
196 | void ttm_base_object_unref(struct ttm_base_object **p_base) | ||
197 | { | ||
198 | struct ttm_base_object *base = *p_base; | ||
199 | struct ttm_object_device *tdev = base->tfile->tdev; | ||
200 | |||
201 | *p_base = NULL; | ||
202 | |||
203 | /* | ||
204 | * Need to take the lock here to avoid racing with | ||
205 | * users trying to look up the object. | ||
206 | */ | ||
207 | |||
208 | write_lock(&tdev->object_lock); | ||
209 | (void)kref_put(&base->refcount, &ttm_release_base); | ||
210 | write_unlock(&tdev->object_lock); | ||
211 | } | ||
212 | EXPORT_SYMBOL(ttm_base_object_unref); | ||
213 | |||
214 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, | ||
215 | uint32_t key) | ||
216 | { | ||
217 | struct ttm_object_device *tdev = tfile->tdev; | ||
218 | struct ttm_base_object *base; | ||
219 | struct drm_hash_item *hash; | ||
220 | int ret; | ||
221 | |||
222 | read_lock(&tdev->object_lock); | ||
223 | ret = drm_ht_find_item(&tdev->object_hash, key, &hash); | ||
224 | |||
225 | if (likely(ret == 0)) { | ||
226 | base = drm_hash_entry(hash, struct ttm_base_object, hash); | ||
227 | kref_get(&base->refcount); | ||
228 | } | ||
229 | read_unlock(&tdev->object_lock); | ||
230 | |||
231 | if (unlikely(ret != 0)) | ||
232 | return NULL; | ||
233 | |||
234 | if (tfile != base->tfile && !base->shareable) { | ||
235 | printk(KERN_ERR TTM_PFX | ||
236 | "Attempted access of non-shareable object.\n"); | ||
237 | ttm_base_object_unref(&base); | ||
238 | return NULL; | ||
239 | } | ||
240 | |||
241 | return base; | ||
242 | } | ||
243 | EXPORT_SYMBOL(ttm_base_object_lookup); | ||
244 | |||
245 | int ttm_ref_object_add(struct ttm_object_file *tfile, | ||
246 | struct ttm_base_object *base, | ||
247 | enum ttm_ref_type ref_type, bool *existed) | ||
248 | { | ||
249 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | ||
250 | struct ttm_ref_object *ref; | ||
251 | struct drm_hash_item *hash; | ||
252 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | ||
253 | int ret = -EINVAL; | ||
254 | |||
255 | if (existed != NULL) | ||
256 | *existed = true; | ||
257 | |||
258 | while (ret == -EINVAL) { | ||
259 | read_lock(&tfile->lock); | ||
260 | ret = drm_ht_find_item(ht, base->hash.key, &hash); | ||
261 | |||
262 | if (ret == 0) { | ||
263 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
264 | kref_get(&ref->kref); | ||
265 | read_unlock(&tfile->lock); | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | read_unlock(&tfile->lock); | ||
270 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), | ||
271 | false, false); | ||
272 | if (unlikely(ret != 0)) | ||
273 | return ret; | ||
274 | ref = kmalloc(sizeof(*ref), GFP_KERNEL); | ||
275 | if (unlikely(ref == NULL)) { | ||
276 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
277 | return -ENOMEM; | ||
278 | } | ||
279 | |||
280 | ref->hash.key = base->hash.key; | ||
281 | ref->obj = base; | ||
282 | ref->tfile = tfile; | ||
283 | ref->ref_type = ref_type; | ||
284 | kref_init(&ref->kref); | ||
285 | |||
286 | write_lock(&tfile->lock); | ||
287 | ret = drm_ht_insert_item(ht, &ref->hash); | ||
288 | |||
289 | if (likely(ret == 0)) { | ||
290 | list_add_tail(&ref->head, &tfile->ref_list); | ||
291 | kref_get(&base->refcount); | ||
292 | write_unlock(&tfile->lock); | ||
293 | if (existed != NULL) | ||
294 | *existed = false; | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | write_unlock(&tfile->lock); | ||
299 | BUG_ON(ret != -EINVAL); | ||
300 | |||
301 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
302 | kfree(ref); | ||
303 | } | ||
304 | |||
305 | return ret; | ||
306 | } | ||
307 | EXPORT_SYMBOL(ttm_ref_object_add); | ||
308 | |||
309 | static void ttm_ref_object_release(struct kref *kref) | ||
310 | { | ||
311 | struct ttm_ref_object *ref = | ||
312 | container_of(kref, struct ttm_ref_object, kref); | ||
313 | struct ttm_base_object *base = ref->obj; | ||
314 | struct ttm_object_file *tfile = ref->tfile; | ||
315 | struct drm_open_hash *ht; | ||
316 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | ||
317 | |||
318 | ht = &tfile->ref_hash[ref->ref_type]; | ||
319 | (void)drm_ht_remove_item(ht, &ref->hash); | ||
320 | list_del(&ref->head); | ||
321 | write_unlock(&tfile->lock); | ||
322 | |||
323 | if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) | ||
324 | base->ref_obj_release(base, ref->ref_type); | ||
325 | |||
326 | ttm_base_object_unref(&ref->obj); | ||
327 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
328 | kfree(ref); | ||
329 | write_lock(&tfile->lock); | ||
330 | } | ||
331 | |||
332 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | ||
333 | unsigned long key, enum ttm_ref_type ref_type) | ||
334 | { | ||
335 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | ||
336 | struct ttm_ref_object *ref; | ||
337 | struct drm_hash_item *hash; | ||
338 | int ret; | ||
339 | |||
340 | write_lock(&tfile->lock); | ||
341 | ret = drm_ht_find_item(ht, key, &hash); | ||
342 | if (unlikely(ret != 0)) { | ||
343 | write_unlock(&tfile->lock); | ||
344 | return -EINVAL; | ||
345 | } | ||
346 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
347 | kref_put(&ref->kref, ttm_ref_object_release); | ||
348 | write_unlock(&tfile->lock); | ||
349 | return 0; | ||
350 | } | ||
351 | EXPORT_SYMBOL(ttm_ref_object_base_unref); | ||
352 | |||
353 | void ttm_object_file_release(struct ttm_object_file **p_tfile) | ||
354 | { | ||
355 | struct ttm_ref_object *ref; | ||
356 | struct list_head *list; | ||
357 | unsigned int i; | ||
358 | struct ttm_object_file *tfile = *p_tfile; | ||
359 | |||
360 | *p_tfile = NULL; | ||
361 | write_lock(&tfile->lock); | ||
362 | |||
363 | /* | ||
364 | * Since we release the lock within the loop, we have to | ||
365 | * restart it from the beginning each time. | ||
366 | */ | ||
367 | |||
368 | while (!list_empty(&tfile->ref_list)) { | ||
369 | list = tfile->ref_list.next; | ||
370 | ref = list_entry(list, struct ttm_ref_object, head); | ||
371 | ttm_ref_object_release(&ref->kref); | ||
372 | } | ||
373 | |||
374 | for (i = 0; i < TTM_REF_NUM; ++i) | ||
375 | drm_ht_remove(&tfile->ref_hash[i]); | ||
376 | |||
377 | write_unlock(&tfile->lock); | ||
378 | ttm_object_file_unref(&tfile); | ||
379 | } | ||
380 | EXPORT_SYMBOL(ttm_object_file_release); | ||
381 | |||
382 | struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, | ||
383 | unsigned int hash_order) | ||
384 | { | ||
385 | struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); | ||
386 | unsigned int i; | ||
387 | unsigned int j = 0; | ||
388 | int ret; | ||
389 | |||
390 | if (unlikely(tfile == NULL)) | ||
391 | return NULL; | ||
392 | |||
393 | rwlock_init(&tfile->lock); | ||
394 | tfile->tdev = tdev; | ||
395 | kref_init(&tfile->refcount); | ||
396 | INIT_LIST_HEAD(&tfile->ref_list); | ||
397 | |||
398 | for (i = 0; i < TTM_REF_NUM; ++i) { | ||
399 | ret = drm_ht_create(&tfile->ref_hash[i], hash_order); | ||
400 | if (ret) { | ||
401 | j = i; | ||
402 | goto out_err; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | return tfile; | ||
407 | out_err: | ||
408 | for (i = 0; i < j; ++i) | ||
409 | drm_ht_remove(&tfile->ref_hash[i]); | ||
410 | |||
411 | kfree(tfile); | ||
412 | |||
413 | return NULL; | ||
414 | } | ||
415 | EXPORT_SYMBOL(ttm_object_file_init); | ||
416 | |||
417 | struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global | ||
418 | *mem_glob, | ||
419 | unsigned int hash_order) | ||
420 | { | ||
421 | struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); | ||
422 | int ret; | ||
423 | |||
424 | if (unlikely(tdev == NULL)) | ||
425 | return NULL; | ||
426 | |||
427 | tdev->mem_glob = mem_glob; | ||
428 | rwlock_init(&tdev->object_lock); | ||
429 | atomic_set(&tdev->object_count, 0); | ||
430 | ret = drm_ht_create(&tdev->object_hash, hash_order); | ||
431 | |||
432 | if (likely(ret == 0)) | ||
433 | return tdev; | ||
434 | |||
435 | kfree(tdev); | ||
436 | return NULL; | ||
437 | } | ||
438 | EXPORT_SYMBOL(ttm_object_device_init); | ||
439 | |||
440 | void ttm_object_device_release(struct ttm_object_device **p_tdev) | ||
441 | { | ||
442 | struct ttm_object_device *tdev = *p_tdev; | ||
443 | |||
444 | *p_tdev = NULL; | ||
445 | |||
446 | write_lock(&tdev->object_lock); | ||
447 | drm_ht_remove(&tdev->object_hash); | ||
448 | write_unlock(&tdev->object_lock); | ||
449 | |||
450 | kfree(tdev); | ||
451 | } | ||
452 | EXPORT_SYMBOL(ttm_object_device_release); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7bcb89f39ce8..9c2b1cc5dba5 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) | |||
192 | ttm->state = tt_unbound; | 192 | ttm->state = tt_unbound; |
193 | return 0; | 193 | return 0; |
194 | } | 194 | } |
195 | EXPORT_SYMBOL(ttm_tt_populate); | ||
195 | 196 | ||
196 | #ifdef CONFIG_X86 | 197 | #ifdef CONFIG_X86 |
197 | static inline int ttm_tt_set_page_caching(struct page *p, | 198 | static inline int ttm_tt_set_page_caching(struct page *p, |