diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-05 22:38:04 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-05 22:57:11 -0400 |
commit | 0c8eb0dc65f5a78b252eeff6c729ba4741390e23 (patch) | |
tree | b3a64b52e9357ca8ea99966d411842659e2a27cf /drivers/gpu/drm/ttm | |
parent | e6b46ee712b92db1cc2449cf4f65bc635366cad4 (diff) | |
parent | d961db75ce86a84f1f04e91ad1014653ed7d9f46 (diff) |
Merge remote branch 'nouveau/for-airlied' of ../drm-nouveau-next into drm-core-next
[airlied - add fix for vmwgfx build]
* 'nouveau/for-airlied' of ../drm-nouveau-next: (93 commits)
drm/ttm: restructure to allow driver to plug in alternate memory manager
drm/ttm: introduce utility function to free an allocated memory node
drm/nouveau: fix thinkos in mem timing table recordlen check
drm/nouveau: parse voltage from perf 0x40 entires
drm/nouveau: don't use the default pll limits in table v2.1 on nv50+ cards
drm/nv50: Fix large 3D performance regression caused by the interchannel sync patches.
drm/nouveau: Synchronize buffer object moves in hardware.
drm/nouveau: Use semaphores to handle inter-channel sync in hardware.
drm/nouveau: Provide a means to have arbitrary work run on fence completion.
drm/nouveau: Minor refactoring/cleanup of the fence code.
drm/nouveau: Add a module option to force card POST.
drm/nv50: prevent (IB_PUT == IB_GET) for occurring unless idle
drm/nv0x-nv4x: Leave the 0x40 bit untouched when changing CRE_LCD.
drm/nv30-nv40: Fix postdivider mask when writing engine/memory PLLs.
drm/nouveau: Fix perf table parsing on BMP v5.25.
drm/nouveau: fix required mode bandwidth calculation for DP
drm/nouveau: fix typo in c2aa91afea5f7e7ae4530fabd37414a79c03328c
drm/nva3: split pm backend out from nv50
drm/nouveau: run perflvl and M table scripts on mem clock change
drm/nouveau: pass perflvl struct to clock_pre()
...
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_agp_backend.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 104 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_manager.c | 148 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 12 |
5 files changed, 179 insertions, 91 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b256d4adfafe..f3cf6f02c997 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | ccflags-y := -Iinclude/drm | 4 | ccflags-y := -Iinclude/drm |
5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ | 5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ |
6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ | 6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ |
7 | ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o | 7 | ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ |
8 | ttm_bo_manager.o | ||
8 | 9 | ||
9 | obj-$(CONFIG_DRM_TTM) += ttm.o | 10 | obj-$(CONFIG_DRM_TTM) += ttm.o |
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 4bf69c404491..f999e36f30b4 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c | |||
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) | |||
74 | { | 74 | { |
75 | struct ttm_agp_backend *agp_be = | 75 | struct ttm_agp_backend *agp_be = |
76 | container_of(backend, struct ttm_agp_backend, backend); | 76 | container_of(backend, struct ttm_agp_backend, backend); |
77 | struct drm_mm_node *node = bo_mem->mm_node; | ||
77 | struct agp_memory *mem = agp_be->mem; | 78 | struct agp_memory *mem = agp_be->mem; |
78 | int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); | 79 | int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); |
79 | int ret; | 80 | int ret; |
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) | |||
81 | mem->is_flushed = 1; | 82 | mem->is_flushed = 1; |
82 | mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; | 83 | mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; |
83 | 84 | ||
84 | ret = agp_bind_memory(mem, bo_mem->mm_node->start); | 85 | ret = agp_bind_memory(mem, node->start); |
85 | if (ret) | 86 | if (ret) |
86 | printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); | 87 | printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); |
87 | 88 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb4cf7ef4d1e..af7b57a47fbc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) | |||
84 | man->available_caching); | 84 | man->available_caching); |
85 | printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", | 85 | printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", |
86 | man->default_caching); | 86 | man->default_caching); |
87 | if (mem_type != TTM_PL_SYSTEM) { | 87 | if (mem_type != TTM_PL_SYSTEM) |
88 | spin_lock(&bdev->glob->lru_lock); | 88 | (*man->func->debug)(man, TTM_PFX); |
89 | drm_mm_debug_table(&man->manager, TTM_PFX); | ||
90 | spin_unlock(&bdev->glob->lru_lock); | ||
91 | } | ||
92 | } | 89 | } |
93 | 90 | ||
94 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, | 91 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, |
@@ -421,7 +418,7 @@ moved: | |||
421 | 418 | ||
422 | if (bo->mem.mm_node) { | 419 | if (bo->mem.mm_node) { |
423 | spin_lock(&bo->lock); | 420 | spin_lock(&bo->lock); |
424 | bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + | 421 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
425 | bdev->man[bo->mem.mem_type].gpu_offset; | 422 | bdev->man[bo->mem.mem_type].gpu_offset; |
426 | bo->cur_placement = bo->mem.placement; | 423 | bo->cur_placement = bo->mem.placement; |
427 | spin_unlock(&bo->lock); | 424 | spin_unlock(&bo->lock); |
@@ -475,11 +472,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
475 | list_del_init(&bo->ddestroy); | 472 | list_del_init(&bo->ddestroy); |
476 | ++put_count; | 473 | ++put_count; |
477 | } | 474 | } |
478 | if (bo->mem.mm_node) { | ||
479 | drm_mm_put_block(bo->mem.mm_node); | ||
480 | bo->mem.mm_node = NULL; | ||
481 | } | ||
482 | spin_unlock(&glob->lru_lock); | 475 | spin_unlock(&glob->lru_lock); |
476 | ttm_bo_mem_put(bo, &bo->mem); | ||
483 | 477 | ||
484 | atomic_set(&bo->reserved, 0); | 478 | atomic_set(&bo->reserved, 0); |
485 | 479 | ||
@@ -621,7 +615,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
621 | bool no_wait_reserve, bool no_wait_gpu) | 615 | bool no_wait_reserve, bool no_wait_gpu) |
622 | { | 616 | { |
623 | struct ttm_bo_device *bdev = bo->bdev; | 617 | struct ttm_bo_device *bdev = bo->bdev; |
624 | struct ttm_bo_global *glob = bo->glob; | ||
625 | struct ttm_mem_reg evict_mem; | 618 | struct ttm_mem_reg evict_mem; |
626 | struct ttm_placement placement; | 619 | struct ttm_placement placement; |
627 | int ret = 0; | 620 | int ret = 0; |
@@ -667,12 +660,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
667 | if (ret) { | 660 | if (ret) { |
668 | if (ret != -ERESTARTSYS) | 661 | if (ret != -ERESTARTSYS) |
669 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); | 662 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); |
670 | spin_lock(&glob->lru_lock); | 663 | ttm_bo_mem_put(bo, &evict_mem); |
671 | if (evict_mem.mm_node) { | ||
672 | drm_mm_put_block(evict_mem.mm_node); | ||
673 | evict_mem.mm_node = NULL; | ||
674 | } | ||
675 | spin_unlock(&glob->lru_lock); | ||
676 | goto out; | 664 | goto out; |
677 | } | 665 | } |
678 | bo->evicted = true; | 666 | bo->evicted = true; |
@@ -733,41 +721,14 @@ retry: | |||
733 | return ret; | 721 | return ret; |
734 | } | 722 | } |
735 | 723 | ||
736 | static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, | 724 | void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
737 | struct ttm_mem_type_manager *man, | ||
738 | struct ttm_placement *placement, | ||
739 | struct ttm_mem_reg *mem, | ||
740 | struct drm_mm_node **node) | ||
741 | { | 725 | { |
742 | struct ttm_bo_global *glob = bo->glob; | 726 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
743 | unsigned long lpfn; | ||
744 | int ret; | ||
745 | 727 | ||
746 | lpfn = placement->lpfn; | 728 | if (mem->mm_node) |
747 | if (!lpfn) | 729 | (*man->func->put_node)(man, mem); |
748 | lpfn = man->size; | ||
749 | *node = NULL; | ||
750 | do { | ||
751 | ret = drm_mm_pre_get(&man->manager); | ||
752 | if (unlikely(ret)) | ||
753 | return ret; | ||
754 | |||
755 | spin_lock(&glob->lru_lock); | ||
756 | *node = drm_mm_search_free_in_range(&man->manager, | ||
757 | mem->num_pages, mem->page_alignment, | ||
758 | placement->fpfn, lpfn, 1); | ||
759 | if (unlikely(*node == NULL)) { | ||
760 | spin_unlock(&glob->lru_lock); | ||
761 | return 0; | ||
762 | } | ||
763 | *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, | ||
764 | mem->page_alignment, | ||
765 | placement->fpfn, | ||
766 | lpfn); | ||
767 | spin_unlock(&glob->lru_lock); | ||
768 | } while (*node == NULL); | ||
769 | return 0; | ||
770 | } | 730 | } |
731 | EXPORT_SYMBOL(ttm_bo_mem_put); | ||
771 | 732 | ||
772 | /** | 733 | /** |
773 | * Repeatedly evict memory from the LRU for @mem_type until we create enough | 734 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
@@ -784,14 +745,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
784 | struct ttm_bo_device *bdev = bo->bdev; | 745 | struct ttm_bo_device *bdev = bo->bdev; |
785 | struct ttm_bo_global *glob = bdev->glob; | 746 | struct ttm_bo_global *glob = bdev->glob; |
786 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 747 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
787 | struct drm_mm_node *node; | ||
788 | int ret; | 748 | int ret; |
789 | 749 | ||
790 | do { | 750 | do { |
791 | ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); | 751 | ret = (*man->func->get_node)(man, bo, placement, mem); |
792 | if (unlikely(ret != 0)) | 752 | if (unlikely(ret != 0)) |
793 | return ret; | 753 | return ret; |
794 | if (node) | 754 | if (mem->mm_node) |
795 | break; | 755 | break; |
796 | spin_lock(&glob->lru_lock); | 756 | spin_lock(&glob->lru_lock); |
797 | if (list_empty(&man->lru)) { | 757 | if (list_empty(&man->lru)) { |
@@ -804,9 +764,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
804 | if (unlikely(ret != 0)) | 764 | if (unlikely(ret != 0)) |
805 | return ret; | 765 | return ret; |
806 | } while (1); | 766 | } while (1); |
807 | if (node == NULL) | 767 | if (mem->mm_node == NULL) |
808 | return -ENOMEM; | 768 | return -ENOMEM; |
809 | mem->mm_node = node; | ||
810 | mem->mem_type = mem_type; | 769 | mem->mem_type = mem_type; |
811 | return 0; | 770 | return 0; |
812 | } | 771 | } |
@@ -880,7 +839,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
880 | bool type_found = false; | 839 | bool type_found = false; |
881 | bool type_ok = false; | 840 | bool type_ok = false; |
882 | bool has_erestartsys = false; | 841 | bool has_erestartsys = false; |
883 | struct drm_mm_node *node = NULL; | ||
884 | int i, ret; | 842 | int i, ret; |
885 | 843 | ||
886 | mem->mm_node = NULL; | 844 | mem->mm_node = NULL; |
@@ -914,17 +872,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
914 | 872 | ||
915 | if (man->has_type && man->use_type) { | 873 | if (man->has_type && man->use_type) { |
916 | type_found = true; | 874 | type_found = true; |
917 | ret = ttm_bo_man_get_node(bo, man, placement, mem, | 875 | ret = (*man->func->get_node)(man, bo, placement, mem); |
918 | &node); | ||
919 | if (unlikely(ret)) | 876 | if (unlikely(ret)) |
920 | return ret; | 877 | return ret; |
921 | } | 878 | } |
922 | if (node) | 879 | if (mem->mm_node) |
923 | break; | 880 | break; |
924 | } | 881 | } |
925 | 882 | ||
926 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { | 883 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
927 | mem->mm_node = node; | ||
928 | mem->mem_type = mem_type; | 884 | mem->mem_type = mem_type; |
929 | mem->placement = cur_flags; | 885 | mem->placement = cur_flags; |
930 | return 0; | 886 | return 0; |
@@ -994,7 +950,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
994 | bool interruptible, bool no_wait_reserve, | 950 | bool interruptible, bool no_wait_reserve, |
995 | bool no_wait_gpu) | 951 | bool no_wait_gpu) |
996 | { | 952 | { |
997 | struct ttm_bo_global *glob = bo->glob; | ||
998 | int ret = 0; | 953 | int ret = 0; |
999 | struct ttm_mem_reg mem; | 954 | struct ttm_mem_reg mem; |
1000 | 955 | ||
@@ -1022,11 +977,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1022 | goto out_unlock; | 977 | goto out_unlock; |
1023 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); | 978 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); |
1024 | out_unlock: | 979 | out_unlock: |
1025 | if (ret && mem.mm_node) { | 980 | if (ret && mem.mm_node) |
1026 | spin_lock(&glob->lru_lock); | 981 | ttm_bo_mem_put(bo, &mem); |
1027 | drm_mm_put_block(mem.mm_node); | ||
1028 | spin_unlock(&glob->lru_lock); | ||
1029 | } | ||
1030 | return ret; | 982 | return ret; |
1031 | } | 983 | } |
1032 | 984 | ||
@@ -1034,11 +986,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, | |||
1034 | struct ttm_mem_reg *mem) | 986 | struct ttm_mem_reg *mem) |
1035 | { | 987 | { |
1036 | int i; | 988 | int i; |
1037 | struct drm_mm_node *node = mem->mm_node; | ||
1038 | 989 | ||
1039 | if (node && placement->lpfn != 0 && | 990 | if (mem->mm_node && placement->lpfn != 0 && |
1040 | (node->start < placement->fpfn || | 991 | (mem->start < placement->fpfn || |
1041 | node->start + node->size > placement->lpfn)) | 992 | mem->start + mem->num_pages > placement->lpfn)) |
1042 | return -1; | 993 | return -1; |
1043 | 994 | ||
1044 | for (i = 0; i < placement->num_placement; i++) { | 995 | for (i = 0; i < placement->num_placement; i++) { |
@@ -1282,7 +1233,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1282 | 1233 | ||
1283 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | 1234 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
1284 | { | 1235 | { |
1285 | struct ttm_bo_global *glob = bdev->glob; | ||
1286 | struct ttm_mem_type_manager *man; | 1236 | struct ttm_mem_type_manager *man; |
1287 | int ret = -EINVAL; | 1237 | int ret = -EINVAL; |
1288 | 1238 | ||
@@ -1305,13 +1255,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1305 | if (mem_type > 0) { | 1255 | if (mem_type > 0) { |
1306 | ttm_bo_force_list_clean(bdev, mem_type, false); | 1256 | ttm_bo_force_list_clean(bdev, mem_type, false); |
1307 | 1257 | ||
1308 | spin_lock(&glob->lru_lock); | 1258 | ret = (*man->func->takedown)(man); |
1309 | if (drm_mm_clean(&man->manager)) | ||
1310 | drm_mm_takedown(&man->manager); | ||
1311 | else | ||
1312 | ret = -EBUSY; | ||
1313 | |||
1314 | spin_unlock(&glob->lru_lock); | ||
1315 | } | 1259 | } |
1316 | 1260 | ||
1317 | return ret; | 1261 | return ret; |
@@ -1362,6 +1306,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1362 | ret = bdev->driver->init_mem_type(bdev, type, man); | 1306 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1363 | if (ret) | 1307 | if (ret) |
1364 | return ret; | 1308 | return ret; |
1309 | man->bdev = bdev; | ||
1365 | 1310 | ||
1366 | ret = 0; | 1311 | ret = 0; |
1367 | if (type != TTM_PL_SYSTEM) { | 1312 | if (type != TTM_PL_SYSTEM) { |
@@ -1371,7 +1316,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1371 | type); | 1316 | type); |
1372 | return ret; | 1317 | return ret; |
1373 | } | 1318 | } |
1374 | ret = drm_mm_init(&man->manager, 0, p_size); | 1319 | |
1320 | ret = (*man->func->init)(man, p_size); | ||
1375 | if (ret) | 1321 | if (ret) |
1376 | return ret; | 1322 | return ret; |
1377 | } | 1323 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c new file mode 100644 index 000000000000..7410c190c891 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c | |||
@@ -0,0 +1,148 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include "ttm/ttm_module.h" | ||
32 | #include "ttm/ttm_bo_driver.h" | ||
33 | #include "ttm/ttm_placement.h" | ||
34 | #include <linux/jiffies.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/sched.h> | ||
37 | #include <linux/mm.h> | ||
38 | #include <linux/file.h> | ||
39 | #include <linux/module.h> | ||
40 | |||
41 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | ||
42 | struct ttm_buffer_object *bo, | ||
43 | struct ttm_placement *placement, | ||
44 | struct ttm_mem_reg *mem) | ||
45 | { | ||
46 | struct ttm_bo_global *glob = man->bdev->glob; | ||
47 | struct drm_mm *mm = man->priv; | ||
48 | struct drm_mm_node *node = NULL; | ||
49 | unsigned long lpfn; | ||
50 | int ret; | ||
51 | |||
52 | lpfn = placement->lpfn; | ||
53 | if (!lpfn) | ||
54 | lpfn = man->size; | ||
55 | do { | ||
56 | ret = drm_mm_pre_get(mm); | ||
57 | if (unlikely(ret)) | ||
58 | return ret; | ||
59 | |||
60 | spin_lock(&glob->lru_lock); | ||
61 | node = drm_mm_search_free_in_range(mm, | ||
62 | mem->num_pages, mem->page_alignment, | ||
63 | placement->fpfn, lpfn, 1); | ||
64 | if (unlikely(node == NULL)) { | ||
65 | spin_unlock(&glob->lru_lock); | ||
66 | return 0; | ||
67 | } | ||
68 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, | ||
69 | mem->page_alignment, | ||
70 | placement->fpfn, | ||
71 | lpfn); | ||
72 | spin_unlock(&glob->lru_lock); | ||
73 | } while (node == NULL); | ||
74 | |||
75 | mem->mm_node = node; | ||
76 | mem->start = node->start; | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | ||
81 | struct ttm_mem_reg *mem) | ||
82 | { | ||
83 | struct ttm_bo_global *glob = man->bdev->glob; | ||
84 | |||
85 | if (mem->mm_node) { | ||
86 | spin_lock(&glob->lru_lock); | ||
87 | drm_mm_put_block(mem->mm_node); | ||
88 | spin_unlock(&glob->lru_lock); | ||
89 | mem->mm_node = NULL; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, | ||
94 | unsigned long p_size) | ||
95 | { | ||
96 | struct drm_mm *mm; | ||
97 | int ret; | ||
98 | |||
99 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | ||
100 | if (!mm) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | ret = drm_mm_init(mm, 0, p_size); | ||
104 | if (ret) { | ||
105 | kfree(mm); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | man->priv = mm; | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) | ||
114 | { | ||
115 | struct ttm_bo_global *glob = man->bdev->glob; | ||
116 | struct drm_mm *mm = man->priv; | ||
117 | int ret = 0; | ||
118 | |||
119 | spin_lock(&glob->lru_lock); | ||
120 | if (drm_mm_clean(mm)) { | ||
121 | drm_mm_takedown(mm); | ||
122 | kfree(mm); | ||
123 | man->priv = NULL; | ||
124 | } else | ||
125 | ret = -EBUSY; | ||
126 | spin_unlock(&glob->lru_lock); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, | ||
131 | const char *prefix) | ||
132 | { | ||
133 | struct ttm_bo_global *glob = man->bdev->glob; | ||
134 | struct drm_mm *mm = man->priv; | ||
135 | |||
136 | spin_lock(&glob->lru_lock); | ||
137 | drm_mm_debug_table(mm, prefix); | ||
138 | spin_unlock(&glob->lru_lock); | ||
139 | } | ||
140 | |||
141 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { | ||
142 | ttm_bo_man_init, | ||
143 | ttm_bo_man_takedown, | ||
144 | ttm_bo_man_get_node, | ||
145 | ttm_bo_man_put_node, | ||
146 | ttm_bo_man_debug | ||
147 | }; | ||
148 | EXPORT_SYMBOL(ttm_bo_manager_func); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3451a82adba7..ff358ad45aa3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -39,14 +39,7 @@ | |||
39 | 39 | ||
40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | 40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
41 | { | 41 | { |
42 | struct ttm_mem_reg *old_mem = &bo->mem; | 42 | ttm_bo_mem_put(bo, &bo->mem); |
43 | |||
44 | if (old_mem->mm_node) { | ||
45 | spin_lock(&bo->glob->lru_lock); | ||
46 | drm_mm_put_block(old_mem->mm_node); | ||
47 | spin_unlock(&bo->glob->lru_lock); | ||
48 | } | ||
49 | old_mem->mm_node = NULL; | ||
50 | } | 43 | } |
51 | 44 | ||
52 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | 45 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
@@ -263,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
263 | dir = 1; | 256 | dir = 1; |
264 | 257 | ||
265 | if ((old_mem->mem_type == new_mem->mem_type) && | 258 | if ((old_mem->mem_type == new_mem->mem_type) && |
266 | (new_mem->mm_node->start < | 259 | (new_mem->start < old_mem->start + old_mem->size)) { |
267 | old_mem->mm_node->start + old_mem->mm_node->size)) { | ||
268 | dir = -1; | 260 | dir = -1; |
269 | add = new_mem->num_pages - 1; | 261 | add = new_mem->num_pages - 1; |
270 | } | 262 | } |