diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_manager.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_manager.c | 81 |
1 files changed, 45 insertions, 36 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 7410c190c891..038e947d00f9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | 3 | * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -31,20 +31,29 @@ | |||
31 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
32 | #include "ttm/ttm_bo_driver.h" | 32 | #include "ttm/ttm_bo_driver.h" |
33 | #include "ttm/ttm_placement.h" | 33 | #include "ttm/ttm_placement.h" |
34 | #include <linux/jiffies.h> | 34 | #include "drm_mm.h" |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/mm.h> | ||
38 | #include <linux/file.h> | ||
39 | #include <linux/module.h> | 37 | #include <linux/module.h> |
40 | 38 | ||
39 | /** | ||
40 | * Currently we use a spinlock for the lock, but a mutex *may* be | ||
41 | * more appropriate to reduce scheduling latency if the range manager | ||
42 | * ends up with very fragmented allocation patterns. | ||
43 | */ | ||
44 | |||
45 | struct ttm_range_manager { | ||
46 | struct drm_mm mm; | ||
47 | spinlock_t lock; | ||
48 | }; | ||
49 | |||
41 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | 50 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, |
42 | struct ttm_buffer_object *bo, | 51 | struct ttm_buffer_object *bo, |
43 | struct ttm_placement *placement, | 52 | struct ttm_placement *placement, |
44 | struct ttm_mem_reg *mem) | 53 | struct ttm_mem_reg *mem) |
45 | { | 54 | { |
46 | struct ttm_bo_global *glob = man->bdev->glob; | 55 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
47 | struct drm_mm *mm = man->priv; | 56 | struct drm_mm *mm = &rman->mm; |
48 | struct drm_mm_node *node = NULL; | 57 | struct drm_mm_node *node = NULL; |
49 | unsigned long lpfn; | 58 | unsigned long lpfn; |
50 | int ret; | 59 | int ret; |
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | |||
57 | if (unlikely(ret)) | 66 | if (unlikely(ret)) |
58 | return ret; | 67 | return ret; |
59 | 68 | ||
60 | spin_lock(&glob->lru_lock); | 69 | spin_lock(&rman->lock); |
61 | node = drm_mm_search_free_in_range(mm, | 70 | node = drm_mm_search_free_in_range(mm, |
62 | mem->num_pages, mem->page_alignment, | 71 | mem->num_pages, mem->page_alignment, |
63 | placement->fpfn, lpfn, 1); | 72 | placement->fpfn, lpfn, 1); |
64 | if (unlikely(node == NULL)) { | 73 | if (unlikely(node == NULL)) { |
65 | spin_unlock(&glob->lru_lock); | 74 | spin_unlock(&rman->lock); |
66 | return 0; | 75 | return 0; |
67 | } | 76 | } |
68 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, | 77 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, |
69 | mem->page_alignment, | 78 | mem->page_alignment, |
70 | placement->fpfn, | 79 | placement->fpfn, |
71 | lpfn); | 80 | lpfn); |
72 | spin_unlock(&glob->lru_lock); | 81 | spin_unlock(&rman->lock); |
73 | } while (node == NULL); | 82 | } while (node == NULL); |
74 | 83 | ||
75 | mem->mm_node = node; | 84 | mem->mm_node = node; |
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | |||
80 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | 89 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, |
81 | struct ttm_mem_reg *mem) | 90 | struct ttm_mem_reg *mem) |
82 | { | 91 | { |
83 | struct ttm_bo_global *glob = man->bdev->glob; | 92 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
84 | 93 | ||
85 | if (mem->mm_node) { | 94 | if (mem->mm_node) { |
86 | spin_lock(&glob->lru_lock); | 95 | spin_lock(&rman->lock); |
87 | drm_mm_put_block(mem->mm_node); | 96 | drm_mm_put_block(mem->mm_node); |
88 | spin_unlock(&glob->lru_lock); | 97 | spin_unlock(&rman->lock); |
89 | mem->mm_node = NULL; | 98 | mem->mm_node = NULL; |
90 | } | 99 | } |
91 | } | 100 | } |
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | |||
93 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, | 102 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, |
94 | unsigned long p_size) | 103 | unsigned long p_size) |
95 | { | 104 | { |
96 | struct drm_mm *mm; | 105 | struct ttm_range_manager *rman; |
97 | int ret; | 106 | int ret; |
98 | 107 | ||
99 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | 108 | rman = kzalloc(sizeof(*rman), GFP_KERNEL); |
100 | if (!mm) | 109 | if (!rman) |
101 | return -ENOMEM; | 110 | return -ENOMEM; |
102 | 111 | ||
103 | ret = drm_mm_init(mm, 0, p_size); | 112 | ret = drm_mm_init(&rman->mm, 0, p_size); |
104 | if (ret) { | 113 | if (ret) { |
105 | kfree(mm); | 114 | kfree(rman); |
106 | return ret; | 115 | return ret; |
107 | } | 116 | } |
108 | 117 | ||
109 | man->priv = mm; | 118 | spin_lock_init(&rman->lock); |
119 | man->priv = rman; | ||
110 | return 0; | 120 | return 0; |
111 | } | 121 | } |
112 | 122 | ||
113 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) | 123 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) |
114 | { | 124 | { |
115 | struct ttm_bo_global *glob = man->bdev->glob; | 125 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
116 | struct drm_mm *mm = man->priv; | 126 | struct drm_mm *mm = &rman->mm; |
117 | int ret = 0; | ||
118 | 127 | ||
119 | spin_lock(&glob->lru_lock); | 128 | spin_lock(&rman->lock); |
120 | if (drm_mm_clean(mm)) { | 129 | if (drm_mm_clean(mm)) { |
121 | drm_mm_takedown(mm); | 130 | drm_mm_takedown(mm); |
122 | kfree(mm); | 131 | spin_unlock(&rman->lock); |
132 | kfree(rman); | ||
123 | man->priv = NULL; | 133 | man->priv = NULL; |
124 | } else | 134 | return 0; |
125 | ret = -EBUSY; | 135 | } |
126 | spin_unlock(&glob->lru_lock); | 136 | spin_unlock(&rman->lock); |
127 | return ret; | 137 | return -EBUSY; |
128 | } | 138 | } |
129 | 139 | ||
130 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, | 140 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, |
131 | const char *prefix) | 141 | const char *prefix) |
132 | { | 142 | { |
133 | struct ttm_bo_global *glob = man->bdev->glob; | 143 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
134 | struct drm_mm *mm = man->priv; | ||
135 | 144 | ||
136 | spin_lock(&glob->lru_lock); | 145 | spin_lock(&rman->lock); |
137 | drm_mm_debug_table(mm, prefix); | 146 | drm_mm_debug_table(&rman->mm, prefix); |
138 | spin_unlock(&glob->lru_lock); | 147 | spin_unlock(&rman->lock); |
139 | } | 148 | } |
140 | 149 | ||
141 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { | 150 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { |