aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-05-28 20:09:59 -0400
committerDave Airlie <airlied@redhat.com>2008-07-13 20:45:01 -0400
commitc0e09200dc0813972442e550a5905a132768e56c (patch)
treed38e635a30ff8b0a2b98b9d7f97cab1501f8209e /drivers/gpu/drm/drm_mm.c
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
drm: reorganise drm tree to be more future proof.
With the coming of kernel based modesetting and the memory manager stuff, the everything in one directory approach was getting very ugly and starting to be unmanageable. This restructures the drm along the lines of other kernel components. It creates a drivers/gpu/drm directory and moves the hw drivers into subdirectores. It moves the includes into an include/drm, and sets up the unifdef for the userspace headers we should be exporting. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c295
1 files changed, 295 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
new file mode 100644
index 000000000000..dcff9e9b52e3
--- /dev/null
+++ b/drivers/gpu/drm/drm_mm.c
@@ -0,0 +1,295 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 */
43
44#include "drmP.h"
45#include <linux/slab.h>
46
47unsigned long drm_mm_tail_space(struct drm_mm *mm)
48{
49 struct list_head *tail_node;
50 struct drm_mm_node *entry;
51
52 tail_node = mm->ml_entry.prev;
53 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
54 if (!entry->free)
55 return 0;
56
57 return entry->size;
58}
59
60int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
61{
62 struct list_head *tail_node;
63 struct drm_mm_node *entry;
64
65 tail_node = mm->ml_entry.prev;
66 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
67 if (!entry->free)
68 return -ENOMEM;
69
70 if (entry->size <= size)
71 return -ENOMEM;
72
73 entry->size -= size;
74 return 0;
75}
76
77
78static int drm_mm_create_tail_node(struct drm_mm *mm,
79 unsigned long start,
80 unsigned long size)
81{
82 struct drm_mm_node *child;
83
84 child = (struct drm_mm_node *)
85 drm_alloc(sizeof(*child), DRM_MEM_MM);
86 if (!child)
87 return -ENOMEM;
88
89 child->free = 1;
90 child->size = size;
91 child->start = start;
92 child->mm = mm;
93
94 list_add_tail(&child->ml_entry, &mm->ml_entry);
95 list_add_tail(&child->fl_entry, &mm->fl_entry);
96
97 return 0;
98}
99
100
101int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
102{
103 struct list_head *tail_node;
104 struct drm_mm_node *entry;
105
106 tail_node = mm->ml_entry.prev;
107 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
108 if (!entry->free) {
109 return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
110 }
111 entry->size += size;
112 return 0;
113}
114
115static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
116 unsigned long size)
117{
118 struct drm_mm_node *child;
119
120 child = (struct drm_mm_node *)
121 drm_alloc(sizeof(*child), DRM_MEM_MM);
122 if (!child)
123 return NULL;
124
125 INIT_LIST_HEAD(&child->fl_entry);
126
127 child->free = 0;
128 child->size = size;
129 child->start = parent->start;
130 child->mm = parent->mm;
131
132 list_add_tail(&child->ml_entry, &parent->ml_entry);
133 INIT_LIST_HEAD(&child->fl_entry);
134
135 parent->size -= size;
136 parent->start += size;
137 return child;
138}
139
140
141
142struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
143 unsigned long size, unsigned alignment)
144{
145
146 struct drm_mm_node *align_splitoff = NULL;
147 struct drm_mm_node *child;
148 unsigned tmp = 0;
149
150 if (alignment)
151 tmp = parent->start % alignment;
152
153 if (tmp) {
154 align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
155 if (!align_splitoff)
156 return NULL;
157 }
158
159 if (parent->size == size) {
160 list_del_init(&parent->fl_entry);
161 parent->free = 0;
162 return parent;
163 } else {
164 child = drm_mm_split_at_start(parent, size);
165 }
166
167 if (align_splitoff)
168 drm_mm_put_block(align_splitoff);
169
170 return child;
171}
172
173/*
174 * Put a block. Merge with the previous and / or next block if they are free.
175 * Otherwise add to the free stack.
176 */
177
178void drm_mm_put_block(struct drm_mm_node * cur)
179{
180
181 struct drm_mm *mm = cur->mm;
182 struct list_head *cur_head = &cur->ml_entry;
183 struct list_head *root_head = &mm->ml_entry;
184 struct drm_mm_node *prev_node = NULL;
185 struct drm_mm_node *next_node;
186
187 int merged = 0;
188
189 if (cur_head->prev != root_head) {
190 prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
191 if (prev_node->free) {
192 prev_node->size += cur->size;
193 merged = 1;
194 }
195 }
196 if (cur_head->next != root_head) {
197 next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
198 if (next_node->free) {
199 if (merged) {
200 prev_node->size += next_node->size;
201 list_del(&next_node->ml_entry);
202 list_del(&next_node->fl_entry);
203 drm_free(next_node, sizeof(*next_node),
204 DRM_MEM_MM);
205 } else {
206 next_node->size += cur->size;
207 next_node->start = cur->start;
208 merged = 1;
209 }
210 }
211 }
212 if (!merged) {
213 cur->free = 1;
214 list_add(&cur->fl_entry, &mm->fl_entry);
215 } else {
216 list_del(&cur->ml_entry);
217 drm_free(cur, sizeof(*cur), DRM_MEM_MM);
218 }
219}
220
221struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
222 unsigned long size,
223 unsigned alignment, int best_match)
224{
225 struct list_head *list;
226 const struct list_head *free_stack = &mm->fl_entry;
227 struct drm_mm_node *entry;
228 struct drm_mm_node *best;
229 unsigned long best_size;
230 unsigned wasted;
231
232 best = NULL;
233 best_size = ~0UL;
234
235 list_for_each(list, free_stack) {
236 entry = list_entry(list, struct drm_mm_node, fl_entry);
237 wasted = 0;
238
239 if (entry->size < size)
240 continue;
241
242 if (alignment) {
243 register unsigned tmp = entry->start % alignment;
244 if (tmp)
245 wasted += alignment - tmp;
246 }
247
248
249 if (entry->size >= size + wasted) {
250 if (!best_match)
251 return entry;
252 if (size < best_size) {
253 best = entry;
254 best_size = entry->size;
255 }
256 }
257 }
258
259 return best;
260}
261
262int drm_mm_clean(struct drm_mm * mm)
263{
264 struct list_head *head = &mm->ml_entry;
265
266 return (head->next->next == head);
267}
268
269int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
270{
271 INIT_LIST_HEAD(&mm->ml_entry);
272 INIT_LIST_HEAD(&mm->fl_entry);
273
274 return drm_mm_create_tail_node(mm, start, size);
275}
276
277
278void drm_mm_takedown(struct drm_mm * mm)
279{
280 struct list_head *bnode = mm->fl_entry.next;
281 struct drm_mm_node *entry;
282
283 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
284
285 if (entry->ml_entry.next != &mm->ml_entry ||
286 entry->fl_entry.next != &mm->fl_entry) {
287 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
288 return;
289 }
290
291 list_del(&entry->fl_entry);
292 list_del(&entry->ml_entry);
293
294 drm_free(entry, sizeof(*entry), DRM_MEM_MM);
295}