diff options
author | Dave Airlie <airlied@redhat.com> | 2011-12-22 14:05:01 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-12-22 14:05:01 -0500 |
commit | 5c72765ed0aa5f70f53d8dfd6f927ef7619dd698 (patch) | |
tree | 8a0a2fa5578a61e4a25acb0b0d7ca8722bdb6319 | |
parent | 5d56fe5fd794a98c4f446f8665fd06b82e93ff64 (diff) | |
parent | 40c34d042dc2f8954d5f619f6348a8659a4b416f (diff) |
Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm into drm-core-next
* 'for-airlied' of git://people.freedesktop.org/~danvet/drm:
drm/i810: don't acces hw regs in lastclose
drm/i810: cleanup reclaim_buffers
drm: kill drm_sman
drm/sis: use drm_mm instead of drm_sman
drm/via: use drm_mm instead of drm_sman
drm/sman: kill user_hash_tab
drm/sis: track user->memblock mapping with idr
drm/via: track user->memblock mapping with idr
drm/sman: rip out owner tracking
drm/sman: kill owner tracking interface functions
drm/via: track obj->drm_fd relations in the driver
drm/sis: track obj->drm_fd relations in the driver
-rw-r--r-- | drivers/gpu/drm/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_sman.c | 351 | ||||
-rw-r--r-- | drivers/gpu/drm/i810/i810_dma.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i810/i810_drv.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i810/i810_drv.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/sis/sis_drv.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/sis/sis_drv.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/sis/sis_mm.c | 196 | ||||
-rw-r--r-- | drivers/gpu/drm/via/via_drv.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/via/via_drv.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/via/via_map.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/via/via_mm.c | 132 | ||||
-rw-r--r-- | include/drm/drm_sman.h | 176 | ||||
-rw-r--r-- | include/drm/sis_drm.h | 4 | ||||
-rw-r--r-- | include/drm/via_drm.h | 4 |
15 files changed, 289 insertions, 684 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 6307486b1637..0cde1b80fdb1 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -9,7 +9,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ | |||
9 | drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ | 9 | drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ |
10 | drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ | 10 | drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ |
11 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ | 11 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ |
12 | drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ | 12 | drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ |
13 | drm_crtc.o drm_modes.o drm_edid.o \ | 13 | drm_crtc.o drm_modes.o drm_edid.o \ |
14 | drm_info.o drm_debugfs.o drm_encoder_slave.o \ | 14 | drm_info.o drm_debugfs.o drm_encoder_slave.o \ |
15 | drm_trace_points.o drm_global.o drm_usb.o | 15 | drm_trace_points.o drm_global.o drm_usb.o |
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c deleted file mode 100644 index cebce45f4429..000000000000 --- a/drivers/gpu/drm/drm_sman.c +++ /dev/null | |||
@@ -1,351 +0,0 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA. | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
18 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
19 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
20 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * The above copyright notice and this permission notice (including the | ||
23 | * next paragraph) shall be included in all copies or substantial portions | ||
24 | * of the Software. | ||
25 | * | ||
26 | * | ||
27 | **************************************************************************/ | ||
28 | /* | ||
29 | * Simple memory manager interface that keeps track on allocate regions on a | ||
30 | * per "owner" basis. All regions associated with an "owner" can be released | ||
31 | * with a simple call. Typically if the "owner" exists. The owner is any | ||
32 | * "unsigned long" identifier. Can typically be a pointer to a file private | ||
33 | * struct or a context identifier. | ||
34 | * | ||
35 | * Authors: | ||
36 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> | ||
37 | */ | ||
38 | |||
39 | #include <linux/export.h> | ||
40 | #include "drm_sman.h" | ||
41 | |||
42 | struct drm_owner_item { | ||
43 | struct drm_hash_item owner_hash; | ||
44 | struct list_head sman_list; | ||
45 | struct list_head mem_blocks; | ||
46 | }; | ||
47 | |||
48 | void drm_sman_takedown(struct drm_sman * sman) | ||
49 | { | ||
50 | drm_ht_remove(&sman->user_hash_tab); | ||
51 | drm_ht_remove(&sman->owner_hash_tab); | ||
52 | kfree(sman->mm); | ||
53 | } | ||
54 | |||
55 | EXPORT_SYMBOL(drm_sman_takedown); | ||
56 | |||
57 | int | ||
58 | drm_sman_init(struct drm_sman * sman, unsigned int num_managers, | ||
59 | unsigned int user_order, unsigned int owner_order) | ||
60 | { | ||
61 | int ret = 0; | ||
62 | |||
63 | sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL); | ||
64 | if (!sman->mm) { | ||
65 | ret = -ENOMEM; | ||
66 | goto out; | ||
67 | } | ||
68 | sman->num_managers = num_managers; | ||
69 | INIT_LIST_HEAD(&sman->owner_items); | ||
70 | ret = drm_ht_create(&sman->owner_hash_tab, owner_order); | ||
71 | if (ret) | ||
72 | goto out1; | ||
73 | ret = drm_ht_create(&sman->user_hash_tab, user_order); | ||
74 | if (!ret) | ||
75 | goto out; | ||
76 | |||
77 | drm_ht_remove(&sman->owner_hash_tab); | ||
78 | out1: | ||
79 | kfree(sman->mm); | ||
80 | out: | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | EXPORT_SYMBOL(drm_sman_init); | ||
85 | |||
86 | static void *drm_sman_mm_allocate(void *private, unsigned long size, | ||
87 | unsigned alignment) | ||
88 | { | ||
89 | struct drm_mm *mm = (struct drm_mm *) private; | ||
90 | struct drm_mm_node *tmp; | ||
91 | |||
92 | tmp = drm_mm_search_free(mm, size, alignment, 1); | ||
93 | if (!tmp) { | ||
94 | return NULL; | ||
95 | } | ||
96 | tmp = drm_mm_get_block(tmp, size, alignment); | ||
97 | return tmp; | ||
98 | } | ||
99 | |||
100 | static void drm_sman_mm_free(void *private, void *ref) | ||
101 | { | ||
102 | struct drm_mm_node *node = (struct drm_mm_node *) ref; | ||
103 | |||
104 | drm_mm_put_block(node); | ||
105 | } | ||
106 | |||
107 | static void drm_sman_mm_destroy(void *private) | ||
108 | { | ||
109 | struct drm_mm *mm = (struct drm_mm *) private; | ||
110 | drm_mm_takedown(mm); | ||
111 | kfree(mm); | ||
112 | } | ||
113 | |||
114 | static unsigned long drm_sman_mm_offset(void *private, void *ref) | ||
115 | { | ||
116 | struct drm_mm_node *node = (struct drm_mm_node *) ref; | ||
117 | return node->start; | ||
118 | } | ||
119 | |||
120 | int | ||
121 | drm_sman_set_range(struct drm_sman * sman, unsigned int manager, | ||
122 | unsigned long start, unsigned long size) | ||
123 | { | ||
124 | struct drm_sman_mm *sman_mm; | ||
125 | struct drm_mm *mm; | ||
126 | int ret; | ||
127 | |||
128 | BUG_ON(manager >= sman->num_managers); | ||
129 | |||
130 | sman_mm = &sman->mm[manager]; | ||
131 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | ||
132 | if (!mm) { | ||
133 | return -ENOMEM; | ||
134 | } | ||
135 | sman_mm->private = mm; | ||
136 | ret = drm_mm_init(mm, start, size); | ||
137 | |||
138 | if (ret) { | ||
139 | kfree(mm); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | sman_mm->allocate = drm_sman_mm_allocate; | ||
144 | sman_mm->free = drm_sman_mm_free; | ||
145 | sman_mm->destroy = drm_sman_mm_destroy; | ||
146 | sman_mm->offset = drm_sman_mm_offset; | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | EXPORT_SYMBOL(drm_sman_set_range); | ||
152 | |||
153 | int | ||
154 | drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, | ||
155 | struct drm_sman_mm * allocator) | ||
156 | { | ||
157 | BUG_ON(manager >= sman->num_managers); | ||
158 | sman->mm[manager] = *allocator; | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | EXPORT_SYMBOL(drm_sman_set_manager); | ||
163 | |||
164 | static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, | ||
165 | unsigned long owner) | ||
166 | { | ||
167 | int ret; | ||
168 | struct drm_hash_item *owner_hash_item; | ||
169 | struct drm_owner_item *owner_item; | ||
170 | |||
171 | ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); | ||
172 | if (!ret) { | ||
173 | return drm_hash_entry(owner_hash_item, struct drm_owner_item, | ||
174 | owner_hash); | ||
175 | } | ||
176 | |||
177 | owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL); | ||
178 | if (!owner_item) | ||
179 | goto out; | ||
180 | |||
181 | INIT_LIST_HEAD(&owner_item->mem_blocks); | ||
182 | owner_item->owner_hash.key = owner; | ||
183 | if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash)) | ||
184 | goto out1; | ||
185 | |||
186 | list_add_tail(&owner_item->sman_list, &sman->owner_items); | ||
187 | return owner_item; | ||
188 | |||
189 | out1: | ||
190 | kfree(owner_item); | ||
191 | out: | ||
192 | return NULL; | ||
193 | } | ||
194 | |||
195 | struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, | ||
196 | unsigned long size, unsigned alignment, | ||
197 | unsigned long owner) | ||
198 | { | ||
199 | void *tmp; | ||
200 | struct drm_sman_mm *sman_mm; | ||
201 | struct drm_owner_item *owner_item; | ||
202 | struct drm_memblock_item *memblock; | ||
203 | |||
204 | BUG_ON(manager >= sman->num_managers); | ||
205 | |||
206 | sman_mm = &sman->mm[manager]; | ||
207 | tmp = sman_mm->allocate(sman_mm->private, size, alignment); | ||
208 | |||
209 | if (!tmp) { | ||
210 | return NULL; | ||
211 | } | ||
212 | |||
213 | memblock = kzalloc(sizeof(*memblock), GFP_KERNEL); | ||
214 | |||
215 | if (!memblock) | ||
216 | goto out; | ||
217 | |||
218 | memblock->mm_info = tmp; | ||
219 | memblock->mm = sman_mm; | ||
220 | memblock->sman = sman; | ||
221 | |||
222 | if (drm_ht_just_insert_please | ||
223 | (&sman->user_hash_tab, &memblock->user_hash, | ||
224 | (unsigned long)memblock, 32, 0, 0)) | ||
225 | goto out1; | ||
226 | |||
227 | owner_item = drm_sman_get_owner_item(sman, owner); | ||
228 | if (!owner_item) | ||
229 | goto out2; | ||
230 | |||
231 | list_add_tail(&memblock->owner_list, &owner_item->mem_blocks); | ||
232 | |||
233 | return memblock; | ||
234 | |||
235 | out2: | ||
236 | drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); | ||
237 | out1: | ||
238 | kfree(memblock); | ||
239 | out: | ||
240 | sman_mm->free(sman_mm->private, tmp); | ||
241 | |||
242 | return NULL; | ||
243 | } | ||
244 | |||
245 | EXPORT_SYMBOL(drm_sman_alloc); | ||
246 | |||
247 | static void drm_sman_free(struct drm_memblock_item *item) | ||
248 | { | ||
249 | struct drm_sman *sman = item->sman; | ||
250 | |||
251 | list_del(&item->owner_list); | ||
252 | drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); | ||
253 | item->mm->free(item->mm->private, item->mm_info); | ||
254 | kfree(item); | ||
255 | } | ||
256 | |||
257 | int drm_sman_free_key(struct drm_sman *sman, unsigned int key) | ||
258 | { | ||
259 | struct drm_hash_item *hash_item; | ||
260 | struct drm_memblock_item *memblock_item; | ||
261 | |||
262 | if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) | ||
263 | return -EINVAL; | ||
264 | |||
265 | memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, | ||
266 | user_hash); | ||
267 | drm_sman_free(memblock_item); | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | EXPORT_SYMBOL(drm_sman_free_key); | ||
272 | |||
273 | static void drm_sman_remove_owner(struct drm_sman *sman, | ||
274 | struct drm_owner_item *owner_item) | ||
275 | { | ||
276 | list_del(&owner_item->sman_list); | ||
277 | drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); | ||
278 | kfree(owner_item); | ||
279 | } | ||
280 | |||
281 | int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) | ||
282 | { | ||
283 | |||
284 | struct drm_hash_item *hash_item; | ||
285 | struct drm_owner_item *owner_item; | ||
286 | |||
287 | if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { | ||
288 | return -1; | ||
289 | } | ||
290 | |||
291 | owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); | ||
292 | if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { | ||
293 | drm_sman_remove_owner(sman, owner_item); | ||
294 | return -1; | ||
295 | } | ||
296 | |||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | EXPORT_SYMBOL(drm_sman_owner_clean); | ||
301 | |||
302 | static void drm_sman_do_owner_cleanup(struct drm_sman *sman, | ||
303 | struct drm_owner_item *owner_item) | ||
304 | { | ||
305 | struct drm_memblock_item *entry, *next; | ||
306 | |||
307 | list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, | ||
308 | owner_list) { | ||
309 | drm_sman_free(entry); | ||
310 | } | ||
311 | drm_sman_remove_owner(sman, owner_item); | ||
312 | } | ||
313 | |||
314 | void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) | ||
315 | { | ||
316 | |||
317 | struct drm_hash_item *hash_item; | ||
318 | struct drm_owner_item *owner_item; | ||
319 | |||
320 | if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { | ||
321 | |||
322 | return; | ||
323 | } | ||
324 | |||
325 | owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); | ||
326 | drm_sman_do_owner_cleanup(sman, owner_item); | ||
327 | } | ||
328 | |||
329 | EXPORT_SYMBOL(drm_sman_owner_cleanup); | ||
330 | |||
331 | void drm_sman_cleanup(struct drm_sman *sman) | ||
332 | { | ||
333 | struct drm_owner_item *entry, *next; | ||
334 | unsigned int i; | ||
335 | struct drm_sman_mm *sman_mm; | ||
336 | |||
337 | list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { | ||
338 | drm_sman_do_owner_cleanup(sman, entry); | ||
339 | } | ||
340 | if (sman->mm) { | ||
341 | for (i = 0; i < sman->num_managers; ++i) { | ||
342 | sman_mm = &sman->mm[i]; | ||
343 | if (sman_mm->private) { | ||
344 | sman_mm->destroy(sman_mm->private); | ||
345 | sman_mm->private = NULL; | ||
346 | } | ||
347 | } | ||
348 | } | ||
349 | } | ||
350 | |||
351 | EXPORT_SYMBOL(drm_sman_cleanup); | ||
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 8f371e8d630f..f7c17b239833 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -222,8 +222,6 @@ static int i810_dma_cleanup(struct drm_device *dev) | |||
222 | pci_free_consistent(dev->pdev, PAGE_SIZE, | 222 | pci_free_consistent(dev->pdev, PAGE_SIZE, |
223 | dev_priv->hw_status_page, | 223 | dev_priv->hw_status_page, |
224 | dev_priv->dma_status_page); | 224 | dev_priv->dma_status_page); |
225 | /* Need to rewrite hardware status page */ | ||
226 | I810_WRITE(0x02080, 0x1ffff000); | ||
227 | } | 225 | } |
228 | kfree(dev->dev_private); | 226 | kfree(dev->dev_private); |
229 | dev->dev_private = NULL; | 227 | dev->dev_private = NULL; |
@@ -888,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev) | |||
888 | } | 886 | } |
889 | 887 | ||
890 | /* Must be called with the lock held */ | 888 | /* Must be called with the lock held */ |
891 | static void i810_reclaim_buffers(struct drm_device *dev, | 889 | void i810_driver_reclaim_buffers(struct drm_device *dev, |
892 | struct drm_file *file_priv) | 890 | struct drm_file *file_priv) |
893 | { | 891 | { |
894 | struct drm_device_dma *dma = dev->dma; | 892 | struct drm_device_dma *dma = dev->dma; |
@@ -1225,12 +1223,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) | |||
1225 | if (dev_priv->page_flipping) | 1223 | if (dev_priv->page_flipping) |
1226 | i810_do_cleanup_pageflip(dev); | 1224 | i810_do_cleanup_pageflip(dev); |
1227 | } | 1225 | } |
1228 | } | ||
1229 | 1226 | ||
1230 | void i810_driver_reclaim_buffers_locked(struct drm_device *dev, | 1227 | if (file_priv->master && file_priv->master->lock.hw_lock) { |
1231 | struct drm_file *file_priv) | 1228 | drm_idlelock_take(&file_priv->master->lock); |
1232 | { | 1229 | i810_driver_reclaim_buffers(dev, file_priv); |
1233 | i810_reclaim_buffers(dev, file_priv); | 1230 | drm_idlelock_release(&file_priv->master->lock); |
1231 | } else { | ||
1232 | /* master disappeared, clean up stuff anyway and hope nothing | ||
1233 | * goes wrong */ | ||
1234 | i810_driver_reclaim_buffers(dev, file_priv); | ||
1235 | } | ||
1236 | |||
1234 | } | 1237 | } |
1235 | 1238 | ||
1236 | int i810_driver_dma_quiescent(struct drm_device *dev) | 1239 | int i810_driver_dma_quiescent(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index ec12f7dc717a..053f1ee58393 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c | |||
@@ -63,7 +63,6 @@ static struct drm_driver driver = { | |||
63 | .lastclose = i810_driver_lastclose, | 63 | .lastclose = i810_driver_lastclose, |
64 | .preclose = i810_driver_preclose, | 64 | .preclose = i810_driver_preclose, |
65 | .device_is_agp = i810_driver_device_is_agp, | 65 | .device_is_agp = i810_driver_device_is_agp, |
66 | .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, | ||
67 | .dma_quiescent = i810_driver_dma_quiescent, | 66 | .dma_quiescent = i810_driver_dma_quiescent, |
68 | .ioctls = i810_ioctls, | 67 | .ioctls = i810_ioctls, |
69 | .fops = &i810_driver_fops, | 68 | .fops = &i810_driver_fops, |
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h index c9339f481795..6e0acad9e0f5 100644 --- a/drivers/gpu/drm/i810/i810_drv.h +++ b/drivers/gpu/drm/i810/i810_drv.h | |||
@@ -116,14 +116,12 @@ typedef struct drm_i810_private { | |||
116 | 116 | ||
117 | /* i810_dma.c */ | 117 | /* i810_dma.c */ |
118 | extern int i810_driver_dma_quiescent(struct drm_device *dev); | 118 | extern int i810_driver_dma_quiescent(struct drm_device *dev); |
119 | extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, | 119 | void i810_driver_reclaim_buffers(struct drm_device *dev, |
120 | struct drm_file *file_priv); | 120 | struct drm_file *file_priv); |
121 | extern int i810_driver_load(struct drm_device *, unsigned long flags); | 121 | extern int i810_driver_load(struct drm_device *, unsigned long flags); |
122 | extern void i810_driver_lastclose(struct drm_device *dev); | 122 | extern void i810_driver_lastclose(struct drm_device *dev); |
123 | extern void i810_driver_preclose(struct drm_device *dev, | 123 | extern void i810_driver_preclose(struct drm_device *dev, |
124 | struct drm_file *file_priv); | 124 | struct drm_file *file_priv); |
125 | extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, | ||
126 | struct drm_file *file_priv); | ||
127 | extern int i810_driver_device_is_agp(struct drm_device *dev); | 125 | extern int i810_driver_device_is_agp(struct drm_device *dev); |
128 | 126 | ||
129 | extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | 127 | extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index bda96a8cd939..06da063ece2e 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c | |||
@@ -48,9 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) | |||
48 | 48 | ||
49 | dev->dev_private = (void *)dev_priv; | 49 | dev->dev_private = (void *)dev_priv; |
50 | dev_priv->chipset = chipset; | 50 | dev_priv->chipset = chipset; |
51 | ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); | 51 | idr_init(&dev->object_name_idr); |
52 | if (ret) | ||
53 | kfree(dev_priv); | ||
54 | 52 | ||
55 | return ret; | 53 | return ret; |
56 | } | 54 | } |
@@ -59,7 +57,9 @@ static int sis_driver_unload(struct drm_device *dev) | |||
59 | { | 57 | { |
60 | drm_sis_private_t *dev_priv = dev->dev_private; | 58 | drm_sis_private_t *dev_priv = dev->dev_private; |
61 | 59 | ||
62 | drm_sman_takedown(&dev_priv->sman); | 60 | idr_remove_all(&dev_priv->object_idr); |
61 | idr_destroy(&dev_priv->object_idr); | ||
62 | |||
63 | kfree(dev_priv); | 63 | kfree(dev_priv); |
64 | 64 | ||
65 | return 0; | 65 | return 0; |
@@ -76,10 +76,35 @@ static const struct file_operations sis_driver_fops = { | |||
76 | .llseek = noop_llseek, | 76 | .llseek = noop_llseek, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static int sis_driver_open(struct drm_device *dev, struct drm_file *file) | ||
80 | { | ||
81 | struct sis_file_private *file_priv; | ||
82 | |||
83 | DRM_DEBUG_DRIVER("\n"); | ||
84 | file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); | ||
85 | if (!file_priv) | ||
86 | return -ENOMEM; | ||
87 | |||
88 | file->driver_priv = file_priv; | ||
89 | |||
90 | INIT_LIST_HEAD(&file_priv->obj_list); | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | void sis_driver_postclose(struct drm_device *dev, struct drm_file *file) | ||
96 | { | ||
97 | struct sis_file_private *file_priv = file->driver_priv; | ||
98 | |||
99 | kfree(file_priv); | ||
100 | } | ||
101 | |||
79 | static struct drm_driver driver = { | 102 | static struct drm_driver driver = { |
80 | .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, | 103 | .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, |
81 | .load = sis_driver_load, | 104 | .load = sis_driver_load, |
82 | .unload = sis_driver_unload, | 105 | .unload = sis_driver_unload, |
106 | .open = sis_driver_open, | ||
107 | .postclose = sis_driver_postclose, | ||
83 | .dma_quiescent = sis_idle, | 108 | .dma_quiescent = sis_idle, |
84 | .reclaim_buffers = NULL, | 109 | .reclaim_buffers = NULL, |
85 | .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, | 110 | .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, |
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h index 194303c177ad..573758b2d2d6 100644 --- a/drivers/gpu/drm/sis/sis_drv.h +++ b/drivers/gpu/drm/sis/sis_drv.h | |||
@@ -44,7 +44,7 @@ enum sis_family { | |||
44 | SIS_CHIP_315 = 1, | 44 | SIS_CHIP_315 = 1, |
45 | }; | 45 | }; |
46 | 46 | ||
47 | #include "drm_sman.h" | 47 | #include "drm_mm.h" |
48 | 48 | ||
49 | 49 | ||
50 | #define SIS_BASE (dev_priv->mmio) | 50 | #define SIS_BASE (dev_priv->mmio) |
@@ -54,12 +54,15 @@ enum sis_family { | |||
54 | typedef struct drm_sis_private { | 54 | typedef struct drm_sis_private { |
55 | drm_local_map_t *mmio; | 55 | drm_local_map_t *mmio; |
56 | unsigned int idle_fault; | 56 | unsigned int idle_fault; |
57 | struct drm_sman sman; | ||
58 | unsigned int chipset; | 57 | unsigned int chipset; |
59 | int vram_initialized; | 58 | int vram_initialized; |
60 | int agp_initialized; | 59 | int agp_initialized; |
61 | unsigned long vram_offset; | 60 | unsigned long vram_offset; |
62 | unsigned long agp_offset; | 61 | unsigned long agp_offset; |
62 | struct drm_mm vram_mm; | ||
63 | struct drm_mm agp_mm; | ||
64 | /** Mapping of userspace keys to mm objects */ | ||
65 | struct idr object_idr; | ||
63 | } drm_sis_private_t; | 66 | } drm_sis_private_t; |
64 | 67 | ||
65 | extern int sis_idle(struct drm_device *dev); | 68 | extern int sis_idle(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 7fe2b63412ce..46cb2bce6cc5 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c | |||
@@ -41,40 +41,18 @@ | |||
41 | #define AGP_TYPE 1 | 41 | #define AGP_TYPE 1 |
42 | 42 | ||
43 | 43 | ||
44 | struct sis_memblock { | ||
45 | struct drm_mm_node mm_node; | ||
46 | struct sis_memreq req; | ||
47 | struct list_head owner_list; | ||
48 | }; | ||
49 | |||
44 | #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) | 50 | #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) |
45 | /* fb management via fb device */ | 51 | /* fb management via fb device */ |
46 | 52 | ||
47 | #define SIS_MM_ALIGN_SHIFT 0 | 53 | #define SIS_MM_ALIGN_SHIFT 0 |
48 | #define SIS_MM_ALIGN_MASK 0 | 54 | #define SIS_MM_ALIGN_MASK 0 |
49 | 55 | ||
50 | static void *sis_sman_mm_allocate(void *private, unsigned long size, | ||
51 | unsigned alignment) | ||
52 | { | ||
53 | struct sis_memreq req; | ||
54 | |||
55 | req.size = size; | ||
56 | sis_malloc(&req); | ||
57 | if (req.size == 0) | ||
58 | return NULL; | ||
59 | else | ||
60 | return (void *)(unsigned long)~req.offset; | ||
61 | } | ||
62 | |||
63 | static void sis_sman_mm_free(void *private, void *ref) | ||
64 | { | ||
65 | sis_free(~((unsigned long)ref)); | ||
66 | } | ||
67 | |||
68 | static void sis_sman_mm_destroy(void *private) | ||
69 | { | ||
70 | ; | ||
71 | } | ||
72 | |||
73 | static unsigned long sis_sman_mm_offset(void *private, void *ref) | ||
74 | { | ||
75 | return ~((unsigned long)ref); | ||
76 | } | ||
77 | |||
78 | #else /* CONFIG_FB_SIS[_MODULE] */ | 56 | #else /* CONFIG_FB_SIS[_MODULE] */ |
79 | 57 | ||
80 | #define SIS_MM_ALIGN_SHIFT 4 | 58 | #define SIS_MM_ALIGN_SHIFT 4 |
@@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file | |||
86 | { | 64 | { |
87 | drm_sis_private_t *dev_priv = dev->dev_private; | 65 | drm_sis_private_t *dev_priv = dev->dev_private; |
88 | drm_sis_fb_t *fb = data; | 66 | drm_sis_fb_t *fb = data; |
89 | int ret; | ||
90 | 67 | ||
91 | mutex_lock(&dev->struct_mutex); | 68 | mutex_lock(&dev->struct_mutex); |
92 | #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) | 69 | /* Unconditionally init the drm_mm, even though we don't use it when the |
93 | { | 70 | * fb sis driver is available - make cleanup easier. */ |
94 | struct drm_sman_mm sman_mm; | 71 | drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT); |
95 | sman_mm.private = (void *)0xFFFFFFFF; | ||
96 | sman_mm.allocate = sis_sman_mm_allocate; | ||
97 | sman_mm.free = sis_sman_mm_free; | ||
98 | sman_mm.destroy = sis_sman_mm_destroy; | ||
99 | sman_mm.offset = sis_sman_mm_offset; | ||
100 | ret = | ||
101 | drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm); | ||
102 | } | ||
103 | #else | ||
104 | ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0, | ||
105 | fb->size >> SIS_MM_ALIGN_SHIFT); | ||
106 | #endif | ||
107 | |||
108 | if (ret) { | ||
109 | DRM_ERROR("VRAM memory manager initialisation error\n"); | ||
110 | mutex_unlock(&dev->struct_mutex); | ||
111 | return ret; | ||
112 | } | ||
113 | 72 | ||
114 | dev_priv->vram_initialized = 1; | 73 | dev_priv->vram_initialized = 1; |
115 | dev_priv->vram_offset = fb->offset; | 74 | dev_priv->vram_offset = fb->offset; |
@@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file | |||
120 | return 0; | 79 | return 0; |
121 | } | 80 | } |
122 | 81 | ||
123 | static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv, | 82 | static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file, |
124 | void *data, int pool) | 83 | void *data, int pool) |
125 | { | 84 | { |
126 | drm_sis_private_t *dev_priv = dev->dev_private; | 85 | drm_sis_private_t *dev_priv = dev->dev_private; |
127 | drm_sis_mem_t *mem = data; | 86 | drm_sis_mem_t *mem = data; |
128 | int retval = 0; | 87 | int retval = 0, user_key; |
129 | struct drm_memblock_item *item; | 88 | struct sis_memblock *item; |
89 | struct sis_file_private *file_priv = file->driver_priv; | ||
90 | unsigned long offset; | ||
130 | 91 | ||
131 | mutex_lock(&dev->struct_mutex); | 92 | mutex_lock(&dev->struct_mutex); |
132 | 93 | ||
@@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv, | |||
138 | return -EINVAL; | 99 | return -EINVAL; |
139 | } | 100 | } |
140 | 101 | ||
141 | mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; | 102 | item = kzalloc(sizeof(*item), GFP_KERNEL); |
142 | item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0, | 103 | if (!item) { |
143 | (unsigned long)file_priv); | 104 | retval = -ENOMEM; |
105 | goto fail_alloc; | ||
106 | } | ||
144 | 107 | ||
145 | mutex_unlock(&dev->struct_mutex); | 108 | mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; |
146 | if (item) { | 109 | if (pool == AGP_TYPE) { |
147 | mem->offset = ((pool == 0) ? | 110 | retval = drm_mm_insert_node(&dev_priv->agp_mm, |
148 | dev_priv->vram_offset : dev_priv->agp_offset) + | 111 | &item->mm_node, |
149 | (item->mm-> | 112 | mem->size, 0); |
150 | offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT); | 113 | offset = item->mm_node.start; |
151 | mem->free = item->user_hash.key; | ||
152 | mem->size = mem->size << SIS_MM_ALIGN_SHIFT; | ||
153 | } else { | 114 | } else { |
154 | mem->offset = 0; | 115 | #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) |
155 | mem->size = 0; | 116 | item->req.size = mem->size; |
156 | mem->free = 0; | 117 | sis_malloc(&item->req); |
118 | if (item->req.size == 0) | ||
119 | retval = -ENOMEM; | ||
120 | offset = item->req.offset; | ||
121 | #else | ||
122 | retval = drm_mm_insert_node(&dev_priv->vram_mm, | ||
123 | &item->mm_node, | ||
124 | mem->size, 0); | ||
125 | offset = item->mm_node.start; | ||
126 | #endif | ||
127 | } | ||
128 | if (retval) | ||
129 | goto fail_alloc; | ||
130 | |||
131 | again: | ||
132 | if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) { | ||
157 | retval = -ENOMEM; | 133 | retval = -ENOMEM; |
134 | goto fail_idr; | ||
158 | } | 135 | } |
159 | 136 | ||
137 | retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key); | ||
138 | if (retval == -EAGAIN) | ||
139 | goto again; | ||
140 | if (retval) | ||
141 | goto fail_idr; | ||
142 | |||
143 | list_add(&item->owner_list, &file_priv->obj_list); | ||
144 | mutex_unlock(&dev->struct_mutex); | ||
145 | |||
146 | mem->offset = ((pool == 0) ? | ||
147 | dev_priv->vram_offset : dev_priv->agp_offset) + | ||
148 | (offset << SIS_MM_ALIGN_SHIFT); | ||
149 | mem->free = user_key; | ||
150 | mem->size = mem->size << SIS_MM_ALIGN_SHIFT; | ||
151 | |||
152 | return 0; | ||
153 | |||
154 | fail_idr: | ||
155 | drm_mm_remove_node(&item->mm_node); | ||
156 | fail_alloc: | ||
157 | kfree(item); | ||
158 | mutex_unlock(&dev->struct_mutex); | ||
159 | |||
160 | mem->offset = 0; | ||
161 | mem->size = 0; | ||
162 | mem->free = 0; | ||
163 | |||
160 | DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, | 164 | DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, |
161 | mem->offset); | 165 | mem->offset); |
162 | 166 | ||
@@ -167,10 +171,25 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil | |||
167 | { | 171 | { |
168 | drm_sis_private_t *dev_priv = dev->dev_private; | 172 | drm_sis_private_t *dev_priv = dev->dev_private; |
169 | drm_sis_mem_t *mem = data; | 173 | drm_sis_mem_t *mem = data; |
174 | struct sis_memblock *obj; | ||
170 | int ret; | 175 | int ret; |
171 | 176 | ||
172 | mutex_lock(&dev->struct_mutex); | 177 | mutex_lock(&dev->struct_mutex); |
173 | ret = drm_sman_free_key(&dev_priv->sman, mem->free); | 178 | obj = idr_find(&dev_priv->object_idr, mem->free); |
179 | if (obj == NULL) { | ||
180 | mutex_unlock(&dev->struct_mutex); | ||
181 | return -EINVAL; | ||
182 | } | ||
183 | |||
184 | idr_remove(&dev_priv->object_idr, mem->free); | ||
185 | list_del(&obj->owner_list); | ||
186 | if (drm_mm_node_allocated(&obj->mm_node)) | ||
187 | drm_mm_remove_node(&obj->mm_node); | ||
188 | #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) | ||
189 | else | ||
190 | sis_free(obj->req.offset); | ||
191 | #endif | ||
192 | kfree(obj); | ||
174 | mutex_unlock(&dev->struct_mutex); | 193 | mutex_unlock(&dev->struct_mutex); |
175 | DRM_DEBUG("free = 0x%lx\n", mem->free); | 194 | DRM_DEBUG("free = 0x%lx\n", mem->free); |
176 | 195 | ||
@@ -188,18 +207,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data, | |||
188 | { | 207 | { |
189 | drm_sis_private_t *dev_priv = dev->dev_private; | 208 | drm_sis_private_t *dev_priv = dev->dev_private; |
190 | drm_sis_agp_t *agp = data; | 209 | drm_sis_agp_t *agp = data; |
191 | int ret; | ||
192 | dev_priv = dev->dev_private; | 210 | dev_priv = dev->dev_private; |
193 | 211 | ||
194 | mutex_lock(&dev->struct_mutex); | 212 | mutex_lock(&dev->struct_mutex); |
195 | ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0, | 213 | drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT); |
196 | agp->size >> SIS_MM_ALIGN_SHIFT); | ||
197 | |||
198 | if (ret) { | ||
199 | DRM_ERROR("AGP memory manager initialisation error\n"); | ||
200 | mutex_unlock(&dev->struct_mutex); | ||
201 | return ret; | ||
202 | } | ||
203 | 214 | ||
204 | dev_priv->agp_initialized = 1; | 215 | dev_priv->agp_initialized = 1; |
205 | dev_priv->agp_offset = agp->offset; | 216 | dev_priv->agp_offset = agp->offset; |
@@ -293,20 +304,26 @@ void sis_lastclose(struct drm_device *dev) | |||
293 | return; | 304 | return; |
294 | 305 | ||
295 | mutex_lock(&dev->struct_mutex); | 306 | mutex_lock(&dev->struct_mutex); |
296 | drm_sman_cleanup(&dev_priv->sman); | 307 | if (dev_priv->vram_initialized) { |
297 | dev_priv->vram_initialized = 0; | 308 | drm_mm_takedown(&dev_priv->vram_mm); |
298 | dev_priv->agp_initialized = 0; | 309 | dev_priv->vram_initialized = 0; |
310 | } | ||
311 | if (dev_priv->agp_initialized) { | ||
312 | drm_mm_takedown(&dev_priv->agp_mm); | ||
313 | dev_priv->agp_initialized = 0; | ||
314 | } | ||
299 | dev_priv->mmio = NULL; | 315 | dev_priv->mmio = NULL; |
300 | mutex_unlock(&dev->struct_mutex); | 316 | mutex_unlock(&dev->struct_mutex); |
301 | } | 317 | } |
302 | 318 | ||
303 | void sis_reclaim_buffers_locked(struct drm_device *dev, | 319 | void sis_reclaim_buffers_locked(struct drm_device *dev, |
304 | struct drm_file *file_priv) | 320 | struct drm_file *file) |
305 | { | 321 | { |
306 | drm_sis_private_t *dev_priv = dev->dev_private; | 322 | struct sis_file_private *file_priv = file->driver_priv; |
323 | struct sis_memblock *entry, *next; | ||
307 | 324 | ||
308 | mutex_lock(&dev->struct_mutex); | 325 | mutex_lock(&dev->struct_mutex); |
309 | if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { | 326 | if (list_empty(&file_priv->obj_list)) { |
310 | mutex_unlock(&dev->struct_mutex); | 327 | mutex_unlock(&dev->struct_mutex); |
311 | return; | 328 | return; |
312 | } | 329 | } |
@@ -314,7 +331,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, | |||
314 | if (dev->driver->dma_quiescent) | 331 | if (dev->driver->dma_quiescent) |
315 | dev->driver->dma_quiescent(dev); | 332 | dev->driver->dma_quiescent(dev); |
316 | 333 | ||
317 | drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); | 334 | |
335 | list_for_each_entry_safe(entry, next, &file_priv->obj_list, | ||
336 | owner_list) { | ||
337 | list_del(&entry->owner_list); | ||
338 | if (drm_mm_node_allocated(&entry->mm_node)) | ||
339 | drm_mm_remove_node(&entry->mm_node); | ||
340 | #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) | ||
341 | else | ||
342 | sis_free(entry->req.offset); | ||
343 | #endif | ||
344 | kfree(entry); | ||
345 | } | ||
318 | mutex_unlock(&dev->struct_mutex); | 346 | mutex_unlock(&dev->struct_mutex); |
319 | return; | 347 | return; |
320 | } | 348 | } |
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index fb43fd368ce1..02661f35f7a0 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c | |||
@@ -30,6 +30,29 @@ | |||
30 | 30 | ||
31 | #include "drm_pciids.h" | 31 | #include "drm_pciids.h" |
32 | 32 | ||
33 | static int via_driver_open(struct drm_device *dev, struct drm_file *file) | ||
34 | { | ||
35 | struct via_file_private *file_priv; | ||
36 | |||
37 | DRM_DEBUG_DRIVER("\n"); | ||
38 | file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); | ||
39 | if (!file_priv) | ||
40 | return -ENOMEM; | ||
41 | |||
42 | file->driver_priv = file_priv; | ||
43 | |||
44 | INIT_LIST_HEAD(&file_priv->obj_list); | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | void via_driver_postclose(struct drm_device *dev, struct drm_file *file) | ||
50 | { | ||
51 | struct via_file_private *file_priv = file->driver_priv; | ||
52 | |||
53 | kfree(file_priv); | ||
54 | } | ||
55 | |||
33 | static struct pci_device_id pciidlist[] = { | 56 | static struct pci_device_id pciidlist[] = { |
34 | viadrv_PCI_IDS | 57 | viadrv_PCI_IDS |
35 | }; | 58 | }; |
@@ -51,6 +74,8 @@ static struct drm_driver driver = { | |||
51 | DRIVER_IRQ_SHARED, | 74 | DRIVER_IRQ_SHARED, |
52 | .load = via_driver_load, | 75 | .load = via_driver_load, |
53 | .unload = via_driver_unload, | 76 | .unload = via_driver_unload, |
77 | .open = via_driver_open, | ||
78 | .postclose = via_driver_postclose, | ||
54 | .context_dtor = via_final_context, | 79 | .context_dtor = via_final_context, |
55 | .get_vblank_counter = via_get_vblank_counter, | 80 | .get_vblank_counter = via_get_vblank_counter, |
56 | .enable_vblank = via_enable_vblank, | 81 | .enable_vblank = via_enable_vblank, |
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h index 9cf87d912325..88edacc93006 100644 --- a/drivers/gpu/drm/via/via_drv.h +++ b/drivers/gpu/drm/via/via_drv.h | |||
@@ -24,7 +24,7 @@ | |||
24 | #ifndef _VIA_DRV_H_ | 24 | #ifndef _VIA_DRV_H_ |
25 | #define _VIA_DRV_H_ | 25 | #define _VIA_DRV_H_ |
26 | 26 | ||
27 | #include "drm_sman.h" | 27 | #include "drm_mm.h" |
28 | #define DRIVER_AUTHOR "Various" | 28 | #define DRIVER_AUTHOR "Various" |
29 | 29 | ||
30 | #define DRIVER_NAME "via" | 30 | #define DRIVER_NAME "via" |
@@ -88,9 +88,12 @@ typedef struct drm_via_private { | |||
88 | uint32_t irq_pending_mask; | 88 | uint32_t irq_pending_mask; |
89 | int *irq_map; | 89 | int *irq_map; |
90 | unsigned int idle_fault; | 90 | unsigned int idle_fault; |
91 | struct drm_sman sman; | ||
92 | int vram_initialized; | 91 | int vram_initialized; |
92 | struct drm_mm vram_mm; | ||
93 | int agp_initialized; | 93 | int agp_initialized; |
94 | struct drm_mm agp_mm; | ||
95 | /** Mapping of userspace keys to mm objects */ | ||
96 | struct idr object_idr; | ||
94 | unsigned long vram_offset; | 97 | unsigned long vram_offset; |
95 | unsigned long agp_offset; | 98 | unsigned long agp_offset; |
96 | drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES]; | 99 | drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES]; |
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c index 6cca9a709f7a..a2ab34365151 100644 --- a/drivers/gpu/drm/via/via_map.c +++ b/drivers/gpu/drm/via/via_map.c | |||
@@ -104,15 +104,10 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) | |||
104 | 104 | ||
105 | dev_priv->chipset = chipset; | 105 | dev_priv->chipset = chipset; |
106 | 106 | ||
107 | ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); | 107 | idr_init(&dev->object_name_idr); |
108 | if (ret) { | ||
109 | kfree(dev_priv); | ||
110 | return ret; | ||
111 | } | ||
112 | 108 | ||
113 | ret = drm_vblank_init(dev, 1); | 109 | ret = drm_vblank_init(dev, 1); |
114 | if (ret) { | 110 | if (ret) { |
115 | drm_sman_takedown(&dev_priv->sman); | ||
116 | kfree(dev_priv); | 111 | kfree(dev_priv); |
117 | return ret; | 112 | return ret; |
118 | } | 113 | } |
@@ -124,7 +119,8 @@ int via_driver_unload(struct drm_device *dev) | |||
124 | { | 119 | { |
125 | drm_via_private_t *dev_priv = dev->dev_private; | 120 | drm_via_private_t *dev_priv = dev->dev_private; |
126 | 121 | ||
127 | drm_sman_takedown(&dev_priv->sman); | 122 | idr_remove_all(&dev_priv->object_idr); |
123 | idr_destroy(&dev_priv->object_idr); | ||
128 | 124 | ||
129 | kfree(dev_priv); | 125 | kfree(dev_priv); |
130 | 126 | ||
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index 6cc2dadae3ef..bedb23d2ece0 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c | |||
@@ -28,26 +28,22 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "via_drm.h" | 29 | #include "via_drm.h" |
30 | #include "via_drv.h" | 30 | #include "via_drv.h" |
31 | #include "drm_sman.h" | ||
32 | 31 | ||
33 | #define VIA_MM_ALIGN_SHIFT 4 | 32 | #define VIA_MM_ALIGN_SHIFT 4 |
34 | #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1) | 33 | #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1) |
35 | 34 | ||
35 | struct via_memblock { | ||
36 | struct drm_mm_node mm_node; | ||
37 | struct list_head owner_list; | ||
38 | }; | ||
39 | |||
36 | int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) | 40 | int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) |
37 | { | 41 | { |
38 | drm_via_agp_t *agp = data; | 42 | drm_via_agp_t *agp = data; |
39 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; | 43 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; |
40 | int ret; | ||
41 | 44 | ||
42 | mutex_lock(&dev->struct_mutex); | 45 | mutex_lock(&dev->struct_mutex); |
43 | ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, | 46 | drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT); |
44 | agp->size >> VIA_MM_ALIGN_SHIFT); | ||
45 | |||
46 | if (ret) { | ||
47 | DRM_ERROR("AGP memory manager initialisation error\n"); | ||
48 | mutex_unlock(&dev->struct_mutex); | ||
49 | return ret; | ||
50 | } | ||
51 | 47 | ||
52 | dev_priv->agp_initialized = 1; | 48 | dev_priv->agp_initialized = 1; |
53 | dev_priv->agp_offset = agp->offset; | 49 | dev_priv->agp_offset = agp->offset; |
@@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
61 | { | 57 | { |
62 | drm_via_fb_t *fb = data; | 58 | drm_via_fb_t *fb = data; |
63 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; | 59 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; |
64 | int ret; | ||
65 | 60 | ||
66 | mutex_lock(&dev->struct_mutex); | 61 | mutex_lock(&dev->struct_mutex); |
67 | ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, | 62 | drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT); |
68 | fb->size >> VIA_MM_ALIGN_SHIFT); | ||
69 | |||
70 | if (ret) { | ||
71 | DRM_ERROR("VRAM memory manager initialisation error\n"); | ||
72 | mutex_unlock(&dev->struct_mutex); | ||
73 | return ret; | ||
74 | } | ||
75 | 63 | ||
76 | dev_priv->vram_initialized = 1; | 64 | dev_priv->vram_initialized = 1; |
77 | dev_priv->vram_offset = fb->offset; | 65 | dev_priv->vram_offset = fb->offset; |
@@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev) | |||
108 | return; | 96 | return; |
109 | 97 | ||
110 | mutex_lock(&dev->struct_mutex); | 98 | mutex_lock(&dev->struct_mutex); |
111 | drm_sman_cleanup(&dev_priv->sman); | 99 | if (dev_priv->vram_initialized) { |
112 | dev_priv->vram_initialized = 0; | 100 | drm_mm_takedown(&dev_priv->vram_mm); |
113 | dev_priv->agp_initialized = 0; | 101 | dev_priv->vram_initialized = 0; |
102 | } | ||
103 | if (dev_priv->agp_initialized) { | ||
104 | drm_mm_takedown(&dev_priv->agp_mm); | ||
105 | dev_priv->agp_initialized = 0; | ||
106 | } | ||
114 | mutex_unlock(&dev->struct_mutex); | 107 | mutex_unlock(&dev->struct_mutex); |
115 | } | 108 | } |
116 | 109 | ||
117 | int via_mem_alloc(struct drm_device *dev, void *data, | 110 | int via_mem_alloc(struct drm_device *dev, void *data, |
118 | struct drm_file *file_priv) | 111 | struct drm_file *file) |
119 | { | 112 | { |
120 | drm_via_mem_t *mem = data; | 113 | drm_via_mem_t *mem = data; |
121 | int retval = 0; | 114 | int retval = 0, user_key; |
122 | struct drm_memblock_item *item; | 115 | struct via_memblock *item; |
123 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; | 116 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; |
117 | struct via_file_private *file_priv = file->driver_priv; | ||
124 | unsigned long tmpSize; | 118 | unsigned long tmpSize; |
125 | 119 | ||
126 | if (mem->type > VIA_MEM_AGP) { | 120 | if (mem->type > VIA_MEM_AGP) { |
@@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data, | |||
136 | return -EINVAL; | 130 | return -EINVAL; |
137 | } | 131 | } |
138 | 132 | ||
133 | item = kzalloc(sizeof(*item), GFP_KERNEL); | ||
134 | if (!item) { | ||
135 | retval = -ENOMEM; | ||
136 | goto fail_alloc; | ||
137 | } | ||
138 | |||
139 | tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; | 139 | tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; |
140 | item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0, | 140 | if (mem->type == VIA_MEM_AGP) |
141 | (unsigned long)file_priv); | 141 | retval = drm_mm_insert_node(&dev_priv->agp_mm, |
142 | mutex_unlock(&dev->struct_mutex); | 142 | &item->mm_node, |
143 | if (item) { | 143 | tmpSize, 0); |
144 | mem->offset = ((mem->type == VIA_MEM_VIDEO) ? | 144 | else |
145 | dev_priv->vram_offset : dev_priv->agp_offset) + | 145 | retval = drm_mm_insert_node(&dev_priv->vram_mm, |
146 | (item->mm-> | 146 | &item->mm_node, |
147 | offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); | 147 | tmpSize, 0); |
148 | mem->index = item->user_hash.key; | 148 | if (retval) |
149 | } else { | 149 | goto fail_alloc; |
150 | mem->offset = 0; | 150 | |
151 | mem->size = 0; | 151 | again: |
152 | mem->index = 0; | 152 | if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) { |
153 | DRM_DEBUG("Video memory allocation failed\n"); | ||
154 | retval = -ENOMEM; | 153 | retval = -ENOMEM; |
154 | goto fail_idr; | ||
155 | } | 155 | } |
156 | 156 | ||
157 | retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key); | ||
158 | if (retval == -EAGAIN) | ||
159 | goto again; | ||
160 | if (retval) | ||
161 | goto fail_idr; | ||
162 | |||
163 | list_add(&item->owner_list, &file_priv->obj_list); | ||
164 | mutex_unlock(&dev->struct_mutex); | ||
165 | |||
166 | mem->offset = ((mem->type == VIA_MEM_VIDEO) ? | ||
167 | dev_priv->vram_offset : dev_priv->agp_offset) + | ||
168 | ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT); | ||
169 | mem->index = user_key; | ||
170 | |||
171 | return 0; | ||
172 | |||
173 | fail_idr: | ||
174 | drm_mm_remove_node(&item->mm_node); | ||
175 | fail_alloc: | ||
176 | kfree(item); | ||
177 | mutex_unlock(&dev->struct_mutex); | ||
178 | |||
179 | mem->offset = 0; | ||
180 | mem->size = 0; | ||
181 | mem->index = 0; | ||
182 | DRM_DEBUG("Video memory allocation failed\n"); | ||
183 | |||
157 | return retval; | 184 | return retval; |
158 | } | 185 | } |
159 | 186 | ||
@@ -161,11 +188,22 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
161 | { | 188 | { |
162 | drm_via_private_t *dev_priv = dev->dev_private; | 189 | drm_via_private_t *dev_priv = dev->dev_private; |
163 | drm_via_mem_t *mem = data; | 190 | drm_via_mem_t *mem = data; |
191 | struct via_memblock *obj; | ||
164 | int ret; | 192 | int ret; |
165 | 193 | ||
166 | mutex_lock(&dev->struct_mutex); | 194 | mutex_lock(&dev->struct_mutex); |
167 | ret = drm_sman_free_key(&dev_priv->sman, mem->index); | 195 | obj = idr_find(&dev_priv->object_idr, mem->index); |
196 | if (obj == NULL) { | ||
197 | mutex_unlock(&dev->struct_mutex); | ||
198 | return -EINVAL; | ||
199 | } | ||
200 | |||
201 | idr_remove(&dev_priv->object_idr, mem->index); | ||
202 | list_del(&obj->owner_list); | ||
203 | drm_mm_remove_node(&obj->mm_node); | ||
204 | kfree(obj); | ||
168 | mutex_unlock(&dev->struct_mutex); | 205 | mutex_unlock(&dev->struct_mutex); |
206 | |||
169 | DRM_DEBUG("free = 0x%lx\n", mem->index); | 207 | DRM_DEBUG("free = 0x%lx\n", mem->index); |
170 | 208 | ||
171 | return ret; | 209 | return ret; |
@@ -173,12 +211,13 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
173 | 211 | ||
174 | 212 | ||
175 | void via_reclaim_buffers_locked(struct drm_device *dev, | 213 | void via_reclaim_buffers_locked(struct drm_device *dev, |
176 | struct drm_file *file_priv) | 214 | struct drm_file *file) |
177 | { | 215 | { |
178 | drm_via_private_t *dev_priv = dev->dev_private; | 216 | struct via_file_private *file_priv = file->driver_priv; |
217 | struct via_memblock *entry, *next; | ||
179 | 218 | ||
180 | mutex_lock(&dev->struct_mutex); | 219 | mutex_lock(&dev->struct_mutex); |
181 | if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { | 220 | if (list_empty(&file_priv->obj_list)) { |
182 | mutex_unlock(&dev->struct_mutex); | 221 | mutex_unlock(&dev->struct_mutex); |
183 | return; | 222 | return; |
184 | } | 223 | } |
@@ -186,7 +225,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev, | |||
186 | if (dev->driver->dma_quiescent) | 225 | if (dev->driver->dma_quiescent) |
187 | dev->driver->dma_quiescent(dev); | 226 | dev->driver->dma_quiescent(dev); |
188 | 227 | ||
189 | drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); | 228 | list_for_each_entry_safe(entry, next, &file_priv->obj_list, |
229 | owner_list) { | ||
230 | list_del(&entry->owner_list); | ||
231 | drm_mm_remove_node(&entry->mm_node); | ||
232 | kfree(entry); | ||
233 | } | ||
190 | mutex_unlock(&dev->struct_mutex); | 234 | mutex_unlock(&dev->struct_mutex); |
191 | return; | 235 | return; |
192 | } | 236 | } |
diff --git a/include/drm/drm_sman.h b/include/drm/drm_sman.h deleted file mode 100644 index 08ecf83ad5d4..000000000000 --- a/include/drm/drm_sman.h +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | * | ||
27 | **************************************************************************/ | ||
28 | /* | ||
29 | * Simple memory MANager interface that keeps track on allocate regions on a | ||
30 | * per "owner" basis. All regions associated with an "owner" can be released | ||
31 | * with a simple call. Typically if the "owner" exists. The owner is any | ||
32 | * "unsigned long" identifier. Can typically be a pointer to a file private | ||
33 | * struct or a context identifier. | ||
34 | * | ||
35 | * Authors: | ||
36 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> | ||
37 | */ | ||
38 | |||
39 | #ifndef DRM_SMAN_H | ||
40 | #define DRM_SMAN_H | ||
41 | |||
42 | #include "drmP.h" | ||
43 | #include "drm_hashtab.h" | ||
44 | |||
45 | /* | ||
46 | * A class that is an abstration of a simple memory allocator. | ||
47 | * The sman implementation provides a default such allocator | ||
48 | * using the drm_mm.c implementation. But the user can replace it. | ||
49 | * See the SiS implementation, which may use the SiS FB kernel module | ||
50 | * for memory management. | ||
51 | */ | ||
52 | |||
53 | struct drm_sman_mm { | ||
54 | /* private info. If allocated, needs to be destroyed by the destroy | ||
55 | function */ | ||
56 | void *private; | ||
57 | |||
58 | /* Allocate a memory block with given size and alignment. | ||
59 | Return an opaque reference to the memory block */ | ||
60 | |||
61 | void *(*allocate) (void *private, unsigned long size, | ||
62 | unsigned alignment); | ||
63 | |||
64 | /* Free a memory block. "ref" is the opaque reference that we got from | ||
65 | the "alloc" function */ | ||
66 | |||
67 | void (*free) (void *private, void *ref); | ||
68 | |||
69 | /* Free all resources associated with this allocator */ | ||
70 | |||
71 | void (*destroy) (void *private); | ||
72 | |||
73 | /* Return a memory offset from the opaque reference returned from the | ||
74 | "alloc" function */ | ||
75 | |||
76 | unsigned long (*offset) (void *private, void *ref); | ||
77 | }; | ||
78 | |||
79 | struct drm_memblock_item { | ||
80 | struct list_head owner_list; | ||
81 | struct drm_hash_item user_hash; | ||
82 | void *mm_info; | ||
83 | struct drm_sman_mm *mm; | ||
84 | struct drm_sman *sman; | ||
85 | }; | ||
86 | |||
87 | struct drm_sman { | ||
88 | struct drm_sman_mm *mm; | ||
89 | int num_managers; | ||
90 | struct drm_open_hash owner_hash_tab; | ||
91 | struct drm_open_hash user_hash_tab; | ||
92 | struct list_head owner_items; | ||
93 | }; | ||
94 | |||
95 | /* | ||
96 | * Take down a memory manager. This function should only be called after a | ||
97 | * successful init and after a call to drm_sman_cleanup. | ||
98 | */ | ||
99 | |||
100 | extern void drm_sman_takedown(struct drm_sman * sman); | ||
101 | |||
102 | /* | ||
103 | * Allocate structures for a manager. | ||
104 | * num_managers are the number of memory pools to manage. (VRAM, AGP, ....) | ||
105 | * user_order is the log2 of the number of buckets in the user hash table. | ||
106 | * set this to approximately log2 of the max number of memory regions | ||
107 | * that will be allocated for _all_ pools together. | ||
108 | * owner_order is the log2 of the number of buckets in the owner hash table. | ||
109 | * set this to approximately log2 of | ||
110 | * the number of client file connections that will | ||
111 | * be using the manager. | ||
112 | * | ||
113 | */ | ||
114 | |||
115 | extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, | ||
116 | unsigned int user_order, unsigned int owner_order); | ||
117 | |||
118 | /* | ||
119 | * Initialize a drm_mm.c allocator. Should be called only once for each | ||
120 | * manager unless a customized allogator is used. | ||
121 | */ | ||
122 | |||
123 | extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, | ||
124 | unsigned long start, unsigned long size); | ||
125 | |||
126 | /* | ||
127 | * Initialize a customized allocator for one of the managers. | ||
128 | * (See the SiS module). The object pointed to by "allocator" is copied, | ||
129 | * so it can be destroyed after this call. | ||
130 | */ | ||
131 | |||
132 | extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger, | ||
133 | struct drm_sman_mm * allocator); | ||
134 | |||
135 | /* | ||
136 | * Allocate a memory block. Aligment is not implemented yet. | ||
137 | */ | ||
138 | |||
139 | extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman, | ||
140 | unsigned int manager, | ||
141 | unsigned long size, | ||
142 | unsigned alignment, | ||
143 | unsigned long owner); | ||
144 | /* | ||
145 | * Free a memory block identified by its user hash key. | ||
146 | */ | ||
147 | |||
148 | extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key); | ||
149 | |||
150 | /* | ||
151 | * returns 1 iff there are no stale memory blocks associated with this owner. | ||
152 | * Typically called to determine if we need to idle the hardware and call | ||
153 | * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all | ||
154 | * resources associated with owner. | ||
155 | */ | ||
156 | |||
157 | extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner); | ||
158 | |||
159 | /* | ||
160 | * Frees all stale memory blocks associated with this owner. Note that this | ||
161 | * requires that the hardware is finished with all blocks, so the graphics engine | ||
162 | * should be idled before this call is made. This function also frees | ||
163 | * any resources associated with "owner" and should be called when owner | ||
164 | * is not going to be referenced anymore. | ||
165 | */ | ||
166 | |||
167 | extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner); | ||
168 | |||
169 | /* | ||
170 | * Frees all stale memory blocks associated with the memory manager. | ||
171 | * See idling above. | ||
172 | */ | ||
173 | |||
174 | extern void drm_sman_cleanup(struct drm_sman * sman); | ||
175 | |||
176 | #endif | ||
diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h index 30f7b3827466..035b804dda6d 100644 --- a/include/drm/sis_drm.h +++ b/include/drm/sis_drm.h | |||
@@ -64,4 +64,8 @@ typedef struct { | |||
64 | unsigned int offset, size; | 64 | unsigned int offset, size; |
65 | } drm_sis_fb_t; | 65 | } drm_sis_fb_t; |
66 | 66 | ||
67 | struct sis_file_private { | ||
68 | struct list_head obj_list; | ||
69 | }; | ||
70 | |||
67 | #endif /* __SIS_DRM_H__ */ | 71 | #endif /* __SIS_DRM_H__ */ |
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h index fd11a5bd892d..79b3b6e0f6b3 100644 --- a/include/drm/via_drm.h +++ b/include/drm/via_drm.h | |||
@@ -274,4 +274,8 @@ typedef struct drm_via_dmablit { | |||
274 | drm_via_blitsync_t sync; | 274 | drm_via_blitsync_t sync; |
275 | } drm_via_dmablit_t; | 275 | } drm_via_dmablit_t; |
276 | 276 | ||
277 | struct via_file_private { | ||
278 | struct list_head obj_list; | ||
279 | }; | ||
280 | |||
277 | #endif /* _VIA_DRM_H_ */ | 281 | #endif /* _VIA_DRM_H_ */ |