diff options
author | Dave Airlie <airlied@starflyer.(none)> | 2005-08-05 08:40:34 -0400 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2005-08-05 08:40:34 -0400 |
commit | 1fad99499afdd2730adb1d53413b91580b1f0662 (patch) | |
tree | 0fec6e843bc6904166e8679c40836ef43d65b808 /drivers | |
parent | db215327c62c2db533afb322761fa04ea6244164 (diff) |
drm: remove the gamma driver
The gamma driver has been broken for quite a while, it doesn't build,
we don't have a userspace, mine is in Ireland etc...
Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/drm/Kconfig | 7 | ||||
-rw-r--r-- | drivers/char/drm/Makefile | 2 | ||||
-rw-r--r-- | drivers/char/drm/drm_pciids.h | 4 | ||||
-rw-r--r-- | drivers/char/drm/gamma_context.h | 492 | ||||
-rw-r--r-- | drivers/char/drm/gamma_dma.c | 946 | ||||
-rw-r--r-- | drivers/char/drm/gamma_drm.h | 90 | ||||
-rw-r--r-- | drivers/char/drm/gamma_drv.c | 59 | ||||
-rw-r--r-- | drivers/char/drm/gamma_drv.h | 147 | ||||
-rw-r--r-- | drivers/char/drm/gamma_lists.h | 215 | ||||
-rw-r--r-- | drivers/char/drm/gamma_lock.h | 140 | ||||
-rw-r--r-- | drivers/char/drm/gamma_old_dma.h | 313 |
11 files changed, 0 insertions, 2415 deletions
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig index d6c50312aec6..f31b9706ef65 100644 --- a/drivers/char/drm/Kconfig +++ b/drivers/char/drm/Kconfig | |||
@@ -23,13 +23,6 @@ config DRM_TDFX | |||
23 | Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), | 23 | Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), |
24 | graphics card. If M is selected, the module will be called tdfx. | 24 | graphics card. If M is selected, the module will be called tdfx. |
25 | 25 | ||
26 | config DRM_GAMMA | ||
27 | tristate "3dlabs GMX 2000" | ||
28 | depends on DRM && BROKEN | ||
29 | help | ||
30 | This is the old gamma driver, please tell me if it might actually | ||
31 | work. | ||
32 | |||
33 | config DRM_R128 | 26 | config DRM_R128 |
34 | tristate "ATI Rage 128" | 27 | tristate "ATI Rage 128" |
35 | depends on DRM && PCI | 28 | depends on DRM && PCI |
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile index ddd941045b1f..3f0cf8e9cc50 100644 --- a/drivers/char/drm/Makefile +++ b/drivers/char/drm/Makefile | |||
@@ -8,7 +8,6 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ | |||
8 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ | 8 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ |
9 | drm_sysfs.o | 9 | drm_sysfs.o |
10 | 10 | ||
11 | gamma-objs := gamma_drv.o gamma_dma.o | ||
12 | tdfx-objs := tdfx_drv.o | 11 | tdfx-objs := tdfx_drv.o |
13 | r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o | 12 | r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o |
14 | mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o | 13 | mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o |
@@ -29,7 +28,6 @@ i915-objs += i915_ioc32.o | |||
29 | endif | 28 | endif |
30 | 29 | ||
31 | obj-$(CONFIG_DRM) += drm.o | 30 | obj-$(CONFIG_DRM) += drm.o |
32 | obj-$(CONFIG_DRM_GAMMA) += gamma.o | ||
33 | obj-$(CONFIG_DRM_TDFX) += tdfx.o | 31 | obj-$(CONFIG_DRM_TDFX) += tdfx.o |
34 | obj-$(CONFIG_DRM_R128) += r128.o | 32 | obj-$(CONFIG_DRM_R128) += r128.o |
35 | obj-$(CONFIG_DRM_RADEON)+= radeon.o | 33 | obj-$(CONFIG_DRM_RADEON)+= radeon.o |
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h index 4f317ec092ee..8e264f9c1a1e 100644 --- a/drivers/char/drm/drm_pciids.h +++ b/drivers/char/drm/drm_pciids.h | |||
@@ -182,10 +182,6 @@ | |||
182 | {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 182 | {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
183 | {0, 0, 0} | 183 | {0, 0, 0} |
184 | 184 | ||
185 | #define gamma_PCI_IDS \ | ||
186 | {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
187 | {0, 0, 0} | ||
188 | |||
189 | #define savage_PCI_IDS \ | 185 | #define savage_PCI_IDS \ |
190 | {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 186 | {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
191 | {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 187 | {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
diff --git a/drivers/char/drm/gamma_context.h b/drivers/char/drm/gamma_context.h deleted file mode 100644 index d11b507f87ee..000000000000 --- a/drivers/char/drm/gamma_context.h +++ /dev/null | |||
@@ -1,492 +0,0 @@ | |||
1 | /* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*- | ||
2 | * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com | ||
3 | * | ||
4 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | * ChangeLog: | ||
31 | * 2001-11-16 Torsten Duwe <duwe@caldera.de> | ||
32 | * added context constructor/destructor hooks, | ||
33 | * needed by SiS driver's memory management. | ||
34 | */ | ||
35 | |||
36 | /* ================================================================ | ||
37 | * Old-style context support -- only used by gamma. | ||
38 | */ | ||
39 | |||
40 | |||
41 | /* The drm_read and drm_write_string code (especially that which manages | ||
42 | the circular buffer), is based on Alessandro Rubini's LINUX DEVICE | ||
43 | DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */ | ||
44 | |||
45 | ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off) | ||
46 | { | ||
47 | drm_file_t *priv = filp->private_data; | ||
48 | drm_device_t *dev = priv->dev; | ||
49 | int left; | ||
50 | int avail; | ||
51 | int send; | ||
52 | int cur; | ||
53 | |||
54 | DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp); | ||
55 | |||
56 | while (dev->buf_rp == dev->buf_wp) { | ||
57 | DRM_DEBUG(" sleeping\n"); | ||
58 | if (filp->f_flags & O_NONBLOCK) { | ||
59 | return -EAGAIN; | ||
60 | } | ||
61 | interruptible_sleep_on(&dev->buf_readers); | ||
62 | if (signal_pending(current)) { | ||
63 | DRM_DEBUG(" interrupted\n"); | ||
64 | return -ERESTARTSYS; | ||
65 | } | ||
66 | DRM_DEBUG(" awake\n"); | ||
67 | } | ||
68 | |||
69 | left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ; | ||
70 | avail = DRM_BSZ - left; | ||
71 | send = DRM_MIN(avail, count); | ||
72 | |||
73 | while (send) { | ||
74 | if (dev->buf_wp > dev->buf_rp) { | ||
75 | cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp); | ||
76 | } else { | ||
77 | cur = DRM_MIN(send, dev->buf_end - dev->buf_rp); | ||
78 | } | ||
79 | if (copy_to_user(buf, dev->buf_rp, cur)) | ||
80 | return -EFAULT; | ||
81 | dev->buf_rp += cur; | ||
82 | if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf; | ||
83 | send -= cur; | ||
84 | } | ||
85 | |||
86 | wake_up_interruptible(&dev->buf_writers); | ||
87 | return DRM_MIN(avail, count); | ||
88 | } | ||
89 | |||
90 | |||
91 | /* In an incredibly convoluted setup, the kernel module actually calls | ||
92 | * back into the X server to perform context switches on behalf of the | ||
93 | * 3d clients. | ||
94 | */ | ||
95 | int DRM(write_string)(drm_device_t *dev, const char *s) | ||
96 | { | ||
97 | int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ; | ||
98 | int send = strlen(s); | ||
99 | int count; | ||
100 | |||
101 | DRM_DEBUG("%d left, %d to send (%p, %p)\n", | ||
102 | left, send, dev->buf_rp, dev->buf_wp); | ||
103 | |||
104 | if (left == 1 || dev->buf_wp != dev->buf_rp) { | ||
105 | DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n", | ||
106 | left, | ||
107 | dev->buf_wp, | ||
108 | dev->buf_rp); | ||
109 | } | ||
110 | |||
111 | while (send) { | ||
112 | if (dev->buf_wp >= dev->buf_rp) { | ||
113 | count = DRM_MIN(send, dev->buf_end - dev->buf_wp); | ||
114 | if (count == left) --count; /* Leave a hole */ | ||
115 | } else { | ||
116 | count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1); | ||
117 | } | ||
118 | strncpy(dev->buf_wp, s, count); | ||
119 | dev->buf_wp += count; | ||
120 | if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf; | ||
121 | send -= count; | ||
122 | } | ||
123 | |||
124 | if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN); | ||
125 | |||
126 | DRM_DEBUG("waking\n"); | ||
127 | wake_up_interruptible(&dev->buf_readers); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait) | ||
132 | { | ||
133 | drm_file_t *priv = filp->private_data; | ||
134 | drm_device_t *dev = priv->dev; | ||
135 | |||
136 | poll_wait(filp, &dev->buf_readers, wait); | ||
137 | if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | int DRM(context_switch)(drm_device_t *dev, int old, int new) | ||
142 | { | ||
143 | char buf[64]; | ||
144 | drm_queue_t *q; | ||
145 | |||
146 | if (test_and_set_bit(0, &dev->context_flag)) { | ||
147 | DRM_ERROR("Reentering -- FIXME\n"); | ||
148 | return -EBUSY; | ||
149 | } | ||
150 | |||
151 | DRM_DEBUG("Context switch from %d to %d\n", old, new); | ||
152 | |||
153 | if (new >= dev->queue_count) { | ||
154 | clear_bit(0, &dev->context_flag); | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | |||
158 | if (new == dev->last_context) { | ||
159 | clear_bit(0, &dev->context_flag); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | q = dev->queuelist[new]; | ||
164 | atomic_inc(&q->use_count); | ||
165 | if (atomic_read(&q->use_count) == 1) { | ||
166 | atomic_dec(&q->use_count); | ||
167 | clear_bit(0, &dev->context_flag); | ||
168 | return -EINVAL; | ||
169 | } | ||
170 | |||
171 | /* This causes the X server to wake up & do a bunch of hardware | ||
172 | * interaction to actually effect the context switch. | ||
173 | */ | ||
174 | sprintf(buf, "C %d %d\n", old, new); | ||
175 | DRM(write_string)(dev, buf); | ||
176 | |||
177 | atomic_dec(&q->use_count); | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | int DRM(context_switch_complete)(drm_device_t *dev, int new) | ||
183 | { | ||
184 | drm_device_dma_t *dma = dev->dma; | ||
185 | |||
186 | dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ | ||
187 | dev->last_switch = jiffies; | ||
188 | |||
189 | if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { | ||
190 | DRM_ERROR("Lock isn't held after context switch\n"); | ||
191 | } | ||
192 | |||
193 | if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) { | ||
194 | if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock, | ||
195 | DRM_KERNEL_CONTEXT)) { | ||
196 | DRM_ERROR("Cannot free lock\n"); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | clear_bit(0, &dev->context_flag); | ||
201 | wake_up_interruptible(&dev->context_wait); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx) | ||
207 | { | ||
208 | DRM_DEBUG("\n"); | ||
209 | |||
210 | if (atomic_read(&q->use_count) != 1 | ||
211 | || atomic_read(&q->finalization) | ||
212 | || atomic_read(&q->block_count)) { | ||
213 | DRM_ERROR("New queue is already in use: u%d f%d b%d\n", | ||
214 | atomic_read(&q->use_count), | ||
215 | atomic_read(&q->finalization), | ||
216 | atomic_read(&q->block_count)); | ||
217 | } | ||
218 | |||
219 | atomic_set(&q->finalization, 0); | ||
220 | atomic_set(&q->block_count, 0); | ||
221 | atomic_set(&q->block_read, 0); | ||
222 | atomic_set(&q->block_write, 0); | ||
223 | atomic_set(&q->total_queued, 0); | ||
224 | atomic_set(&q->total_flushed, 0); | ||
225 | atomic_set(&q->total_locks, 0); | ||
226 | |||
227 | init_waitqueue_head(&q->write_queue); | ||
228 | init_waitqueue_head(&q->read_queue); | ||
229 | init_waitqueue_head(&q->flush_queue); | ||
230 | |||
231 | q->flags = ctx->flags; | ||
232 | |||
233 | DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count); | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | |||
239 | /* drm_alloc_queue: | ||
240 | PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not | ||
241 | disappear (so all deallocation must be done after IOCTLs are off) | ||
242 | 2) dev->queue_count < dev->queue_slots | ||
243 | 3) dev->queuelist[i].use_count == 0 and | ||
244 | dev->queuelist[i].finalization == 0 if i not in use | ||
245 | POST: 1) dev->queuelist[i].use_count == 1 | ||
246 | 2) dev->queue_count < dev->queue_slots */ | ||
247 | |||
248 | static int DRM(alloc_queue)(drm_device_t *dev) | ||
249 | { | ||
250 | int i; | ||
251 | drm_queue_t *queue; | ||
252 | int oldslots; | ||
253 | int newslots; | ||
254 | /* Check for a free queue */ | ||
255 | for (i = 0; i < dev->queue_count; i++) { | ||
256 | atomic_inc(&dev->queuelist[i]->use_count); | ||
257 | if (atomic_read(&dev->queuelist[i]->use_count) == 1 | ||
258 | && !atomic_read(&dev->queuelist[i]->finalization)) { | ||
259 | DRM_DEBUG("%d (free)\n", i); | ||
260 | return i; | ||
261 | } | ||
262 | atomic_dec(&dev->queuelist[i]->use_count); | ||
263 | } | ||
264 | /* Allocate a new queue */ | ||
265 | down(&dev->struct_sem); | ||
266 | |||
267 | queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES); | ||
268 | memset(queue, 0, sizeof(*queue)); | ||
269 | atomic_set(&queue->use_count, 1); | ||
270 | |||
271 | ++dev->queue_count; | ||
272 | if (dev->queue_count >= dev->queue_slots) { | ||
273 | oldslots = dev->queue_slots * sizeof(*dev->queuelist); | ||
274 | if (!dev->queue_slots) dev->queue_slots = 1; | ||
275 | dev->queue_slots *= 2; | ||
276 | newslots = dev->queue_slots * sizeof(*dev->queuelist); | ||
277 | |||
278 | dev->queuelist = DRM(realloc)(dev->queuelist, | ||
279 | oldslots, | ||
280 | newslots, | ||
281 | DRM_MEM_QUEUES); | ||
282 | if (!dev->queuelist) { | ||
283 | up(&dev->struct_sem); | ||
284 | DRM_DEBUG("out of memory\n"); | ||
285 | return -ENOMEM; | ||
286 | } | ||
287 | } | ||
288 | dev->queuelist[dev->queue_count-1] = queue; | ||
289 | |||
290 | up(&dev->struct_sem); | ||
291 | DRM_DEBUG("%d (new)\n", dev->queue_count - 1); | ||
292 | return dev->queue_count - 1; | ||
293 | } | ||
294 | |||
295 | int DRM(resctx)(struct inode *inode, struct file *filp, | ||
296 | unsigned int cmd, unsigned long arg) | ||
297 | { | ||
298 | drm_ctx_res_t __user *argp = (void __user *)arg; | ||
299 | drm_ctx_res_t res; | ||
300 | drm_ctx_t ctx; | ||
301 | int i; | ||
302 | |||
303 | DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); | ||
304 | if (copy_from_user(&res, argp, sizeof(res))) | ||
305 | return -EFAULT; | ||
306 | if (res.count >= DRM_RESERVED_CONTEXTS) { | ||
307 | memset(&ctx, 0, sizeof(ctx)); | ||
308 | for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { | ||
309 | ctx.handle = i; | ||
310 | if (copy_to_user(&res.contexts[i], | ||
311 | &i, | ||
312 | sizeof(i))) | ||
313 | return -EFAULT; | ||
314 | } | ||
315 | } | ||
316 | res.count = DRM_RESERVED_CONTEXTS; | ||
317 | if (copy_to_user(argp, &res, sizeof(res))) | ||
318 | return -EFAULT; | ||
319 | return 0; | ||
320 | } | ||
321 | |||
322 | int DRM(addctx)(struct inode *inode, struct file *filp, | ||
323 | unsigned int cmd, unsigned long arg) | ||
324 | { | ||
325 | drm_file_t *priv = filp->private_data; | ||
326 | drm_device_t *dev = priv->dev; | ||
327 | drm_ctx_t ctx; | ||
328 | drm_ctx_t __user *argp = (void __user *)arg; | ||
329 | |||
330 | if (copy_from_user(&ctx, argp, sizeof(ctx))) | ||
331 | return -EFAULT; | ||
332 | if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) { | ||
333 | /* Init kernel's context and get a new one. */ | ||
334 | DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx); | ||
335 | ctx.handle = DRM(alloc_queue)(dev); | ||
336 | } | ||
337 | DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx); | ||
338 | DRM_DEBUG("%d\n", ctx.handle); | ||
339 | if (copy_to_user(argp, &ctx, sizeof(ctx))) | ||
340 | return -EFAULT; | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | int DRM(modctx)(struct inode *inode, struct file *filp, | ||
345 | unsigned int cmd, unsigned long arg) | ||
346 | { | ||
347 | drm_file_t *priv = filp->private_data; | ||
348 | drm_device_t *dev = priv->dev; | ||
349 | drm_ctx_t ctx; | ||
350 | drm_queue_t *q; | ||
351 | |||
352 | if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx))) | ||
353 | return -EFAULT; | ||
354 | |||
355 | DRM_DEBUG("%d\n", ctx.handle); | ||
356 | |||
357 | if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL; | ||
358 | q = dev->queuelist[ctx.handle]; | ||
359 | |||
360 | atomic_inc(&q->use_count); | ||
361 | if (atomic_read(&q->use_count) == 1) { | ||
362 | /* No longer in use */ | ||
363 | atomic_dec(&q->use_count); | ||
364 | return -EINVAL; | ||
365 | } | ||
366 | |||
367 | if (DRM_BUFCOUNT(&q->waitlist)) { | ||
368 | atomic_dec(&q->use_count); | ||
369 | return -EBUSY; | ||
370 | } | ||
371 | |||
372 | q->flags = ctx.flags; | ||
373 | |||
374 | atomic_dec(&q->use_count); | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | int DRM(getctx)(struct inode *inode, struct file *filp, | ||
379 | unsigned int cmd, unsigned long arg) | ||
380 | { | ||
381 | drm_file_t *priv = filp->private_data; | ||
382 | drm_device_t *dev = priv->dev; | ||
383 | drm_ctx_t __user *argp = (void __user *)arg; | ||
384 | drm_ctx_t ctx; | ||
385 | drm_queue_t *q; | ||
386 | |||
387 | if (copy_from_user(&ctx, argp, sizeof(ctx))) | ||
388 | return -EFAULT; | ||
389 | |||
390 | DRM_DEBUG("%d\n", ctx.handle); | ||
391 | |||
392 | if (ctx.handle >= dev->queue_count) return -EINVAL; | ||
393 | q = dev->queuelist[ctx.handle]; | ||
394 | |||
395 | atomic_inc(&q->use_count); | ||
396 | if (atomic_read(&q->use_count) == 1) { | ||
397 | /* No longer in use */ | ||
398 | atomic_dec(&q->use_count); | ||
399 | return -EINVAL; | ||
400 | } | ||
401 | |||
402 | ctx.flags = q->flags; | ||
403 | atomic_dec(&q->use_count); | ||
404 | |||
405 | if (copy_to_user(argp, &ctx, sizeof(ctx))) | ||
406 | return -EFAULT; | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | int DRM(switchctx)(struct inode *inode, struct file *filp, | ||
412 | unsigned int cmd, unsigned long arg) | ||
413 | { | ||
414 | drm_file_t *priv = filp->private_data; | ||
415 | drm_device_t *dev = priv->dev; | ||
416 | drm_ctx_t ctx; | ||
417 | |||
418 | if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx))) | ||
419 | return -EFAULT; | ||
420 | DRM_DEBUG("%d\n", ctx.handle); | ||
421 | return DRM(context_switch)(dev, dev->last_context, ctx.handle); | ||
422 | } | ||
423 | |||
424 | int DRM(newctx)(struct inode *inode, struct file *filp, | ||
425 | unsigned int cmd, unsigned long arg) | ||
426 | { | ||
427 | drm_file_t *priv = filp->private_data; | ||
428 | drm_device_t *dev = priv->dev; | ||
429 | drm_ctx_t ctx; | ||
430 | |||
431 | if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx))) | ||
432 | return -EFAULT; | ||
433 | DRM_DEBUG("%d\n", ctx.handle); | ||
434 | DRM(context_switch_complete)(dev, ctx.handle); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | int DRM(rmctx)(struct inode *inode, struct file *filp, | ||
440 | unsigned int cmd, unsigned long arg) | ||
441 | { | ||
442 | drm_file_t *priv = filp->private_data; | ||
443 | drm_device_t *dev = priv->dev; | ||
444 | drm_ctx_t ctx; | ||
445 | drm_queue_t *q; | ||
446 | drm_buf_t *buf; | ||
447 | |||
448 | if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx))) | ||
449 | return -EFAULT; | ||
450 | DRM_DEBUG("%d\n", ctx.handle); | ||
451 | |||
452 | if (ctx.handle >= dev->queue_count) return -EINVAL; | ||
453 | q = dev->queuelist[ctx.handle]; | ||
454 | |||
455 | atomic_inc(&q->use_count); | ||
456 | if (atomic_read(&q->use_count) == 1) { | ||
457 | /* No longer in use */ | ||
458 | atomic_dec(&q->use_count); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | |||
462 | atomic_inc(&q->finalization); /* Mark queue in finalization state */ | ||
463 | atomic_sub(2, &q->use_count); /* Mark queue as unused (pending | ||
464 | finalization) */ | ||
465 | |||
466 | while (test_and_set_bit(0, &dev->interrupt_flag)) { | ||
467 | schedule(); | ||
468 | if (signal_pending(current)) { | ||
469 | clear_bit(0, &dev->interrupt_flag); | ||
470 | return -EINTR; | ||
471 | } | ||
472 | } | ||
473 | /* Remove queued buffers */ | ||
474 | while ((buf = DRM(waitlist_get)(&q->waitlist))) { | ||
475 | DRM(free_buffer)(dev, buf); | ||
476 | } | ||
477 | clear_bit(0, &dev->interrupt_flag); | ||
478 | |||
479 | /* Wakeup blocked processes */ | ||
480 | wake_up_interruptible(&q->read_queue); | ||
481 | wake_up_interruptible(&q->write_queue); | ||
482 | wake_up_interruptible(&q->flush_queue); | ||
483 | |||
484 | /* Finalization over. Queue is made | ||
485 | available when both use_count and | ||
486 | finalization become 0, which won't | ||
487 | happen until all the waiting processes | ||
488 | stop waiting. */ | ||
489 | atomic_dec(&q->finalization); | ||
490 | return 0; | ||
491 | } | ||
492 | |||
diff --git a/drivers/char/drm/gamma_dma.c b/drivers/char/drm/gamma_dma.c deleted file mode 100644 index e486fb8d31e9..000000000000 --- a/drivers/char/drm/gamma_dma.c +++ /dev/null | |||
@@ -1,946 +0,0 @@ | |||
1 | /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*- | ||
2 | * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include "gamma.h" | ||
33 | #include "drmP.h" | ||
34 | #include "drm.h" | ||
35 | #include "gamma_drm.h" | ||
36 | #include "gamma_drv.h" | ||
37 | |||
38 | #include <linux/interrupt.h> /* For task queue support */ | ||
39 | #include <linux/delay.h> | ||
40 | |||
41 | static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address, | ||
42 | unsigned long length) | ||
43 | { | ||
44 | drm_gamma_private_t *dev_priv = | ||
45 | (drm_gamma_private_t *)dev->dev_private; | ||
46 | mb(); | ||
47 | while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2) | ||
48 | cpu_relax(); | ||
49 | |||
50 | GAMMA_WRITE(GAMMA_DMAADDRESS, address); | ||
51 | |||
52 | while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) | ||
53 | cpu_relax(); | ||
54 | |||
55 | GAMMA_WRITE(GAMMA_DMACOUNT, length / 4); | ||
56 | } | ||
57 | |||
58 | void gamma_dma_quiescent_single(drm_device_t *dev) | ||
59 | { | ||
60 | drm_gamma_private_t *dev_priv = | ||
61 | (drm_gamma_private_t *)dev->dev_private; | ||
62 | while (GAMMA_READ(GAMMA_DMACOUNT)) | ||
63 | cpu_relax(); | ||
64 | |||
65 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2) | ||
66 | cpu_relax(); | ||
67 | |||
68 | GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); | ||
69 | GAMMA_WRITE(GAMMA_SYNC, 0); | ||
70 | |||
71 | do { | ||
72 | while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) | ||
73 | cpu_relax(); | ||
74 | } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); | ||
75 | } | ||
76 | |||
77 | void gamma_dma_quiescent_dual(drm_device_t *dev) | ||
78 | { | ||
79 | drm_gamma_private_t *dev_priv = | ||
80 | (drm_gamma_private_t *)dev->dev_private; | ||
81 | while (GAMMA_READ(GAMMA_DMACOUNT)) | ||
82 | cpu_relax(); | ||
83 | |||
84 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) | ||
85 | cpu_relax(); | ||
86 | |||
87 | GAMMA_WRITE(GAMMA_BROADCASTMASK, 3); | ||
88 | GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); | ||
89 | GAMMA_WRITE(GAMMA_SYNC, 0); | ||
90 | |||
91 | /* Read from first MX */ | ||
92 | do { | ||
93 | while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) | ||
94 | cpu_relax(); | ||
95 | } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); | ||
96 | |||
97 | /* Read from second MX */ | ||
98 | do { | ||
99 | while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) | ||
100 | cpu_relax(); | ||
101 | } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG); | ||
102 | } | ||
103 | |||
104 | void gamma_dma_ready(drm_device_t *dev) | ||
105 | { | ||
106 | drm_gamma_private_t *dev_priv = | ||
107 | (drm_gamma_private_t *)dev->dev_private; | ||
108 | while (GAMMA_READ(GAMMA_DMACOUNT)) | ||
109 | cpu_relax(); | ||
110 | } | ||
111 | |||
112 | static inline int gamma_dma_is_ready(drm_device_t *dev) | ||
113 | { | ||
114 | drm_gamma_private_t *dev_priv = | ||
115 | (drm_gamma_private_t *)dev->dev_private; | ||
116 | return (!GAMMA_READ(GAMMA_DMACOUNT)); | ||
117 | } | ||
118 | |||
119 | irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS ) | ||
120 | { | ||
121 | drm_device_t *dev = (drm_device_t *)arg; | ||
122 | drm_device_dma_t *dma = dev->dma; | ||
123 | drm_gamma_private_t *dev_priv = | ||
124 | (drm_gamma_private_t *)dev->dev_private; | ||
125 | |||
126 | /* FIXME: should check whether we're actually interested in the interrupt? */ | ||
127 | atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */ | ||
128 | |||
129 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) | ||
130 | cpu_relax(); | ||
131 | |||
132 | GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */ | ||
133 | GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8); | ||
134 | GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001); | ||
135 | if (gamma_dma_is_ready(dev)) { | ||
136 | /* Free previous buffer */ | ||
137 | if (test_and_set_bit(0, &dev->dma_flag)) | ||
138 | return IRQ_HANDLED; | ||
139 | if (dma->this_buffer) { | ||
140 | gamma_free_buffer(dev, dma->this_buffer); | ||
141 | dma->this_buffer = NULL; | ||
142 | } | ||
143 | clear_bit(0, &dev->dma_flag); | ||
144 | |||
145 | /* Dispatch new buffer */ | ||
146 | schedule_work(&dev->work); | ||
147 | } | ||
148 | return IRQ_HANDLED; | ||
149 | } | ||
150 | |||
151 | /* Only called by gamma_dma_schedule. */ | ||
152 | static int gamma_do_dma(drm_device_t *dev, int locked) | ||
153 | { | ||
154 | unsigned long address; | ||
155 | unsigned long length; | ||
156 | drm_buf_t *buf; | ||
157 | int retcode = 0; | ||
158 | drm_device_dma_t *dma = dev->dma; | ||
159 | |||
160 | if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY; | ||
161 | |||
162 | |||
163 | if (!dma->next_buffer) { | ||
164 | DRM_ERROR("No next_buffer\n"); | ||
165 | clear_bit(0, &dev->dma_flag); | ||
166 | return -EINVAL; | ||
167 | } | ||
168 | |||
169 | buf = dma->next_buffer; | ||
170 | /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */ | ||
171 | /* So we pass the buffer index value into the physical page offset */ | ||
172 | address = buf->idx << 12; | ||
173 | length = buf->used; | ||
174 | |||
175 | DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", | ||
176 | buf->context, buf->idx, length); | ||
177 | |||
178 | if (buf->list == DRM_LIST_RECLAIM) { | ||
179 | gamma_clear_next_buffer(dev); | ||
180 | gamma_free_buffer(dev, buf); | ||
181 | clear_bit(0, &dev->dma_flag); | ||
182 | return -EINVAL; | ||
183 | } | ||
184 | |||
185 | if (!length) { | ||
186 | DRM_ERROR("0 length buffer\n"); | ||
187 | gamma_clear_next_buffer(dev); | ||
188 | gamma_free_buffer(dev, buf); | ||
189 | clear_bit(0, &dev->dma_flag); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | if (!gamma_dma_is_ready(dev)) { | ||
194 | clear_bit(0, &dev->dma_flag); | ||
195 | return -EBUSY; | ||
196 | } | ||
197 | |||
198 | if (buf->while_locked) { | ||
199 | if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { | ||
200 | DRM_ERROR("Dispatching buffer %d from pid %d" | ||
201 | " \"while locked\", but no lock held\n", | ||
202 | buf->idx, current->pid); | ||
203 | } | ||
204 | } else { | ||
205 | if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock, | ||
206 | DRM_KERNEL_CONTEXT)) { | ||
207 | clear_bit(0, &dev->dma_flag); | ||
208 | return -EBUSY; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | if (dev->last_context != buf->context | ||
213 | && !(dev->queuelist[buf->context]->flags | ||
214 | & _DRM_CONTEXT_PRESERVED)) { | ||
215 | /* PRE: dev->last_context != buf->context */ | ||
216 | if (DRM(context_switch)(dev, dev->last_context, | ||
217 | buf->context)) { | ||
218 | DRM(clear_next_buffer)(dev); | ||
219 | DRM(free_buffer)(dev, buf); | ||
220 | } | ||
221 | retcode = -EBUSY; | ||
222 | goto cleanup; | ||
223 | |||
224 | /* POST: we will wait for the context | ||
225 | switch and will dispatch on a later call | ||
226 | when dev->last_context == buf->context. | ||
227 | NOTE WE HOLD THE LOCK THROUGHOUT THIS | ||
228 | TIME! */ | ||
229 | } | ||
230 | |||
231 | gamma_clear_next_buffer(dev); | ||
232 | buf->pending = 1; | ||
233 | buf->waiting = 0; | ||
234 | buf->list = DRM_LIST_PEND; | ||
235 | |||
236 | /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */ | ||
237 | address = buf->idx << 12; | ||
238 | |||
239 | gamma_dma_dispatch(dev, address, length); | ||
240 | gamma_free_buffer(dev, dma->this_buffer); | ||
241 | dma->this_buffer = buf; | ||
242 | |||
243 | atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */ | ||
244 | atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */ | ||
245 | |||
246 | if (!buf->while_locked && !dev->context_flag && !locked) { | ||
247 | if (gamma_lock_free(dev, &dev->lock.hw_lock->lock, | ||
248 | DRM_KERNEL_CONTEXT)) { | ||
249 | DRM_ERROR("\n"); | ||
250 | } | ||
251 | } | ||
252 | cleanup: | ||
253 | |||
254 | clear_bit(0, &dev->dma_flag); | ||
255 | |||
256 | |||
257 | return retcode; | ||
258 | } | ||
259 | |||
260 | static void gamma_dma_timer_bh(unsigned long dev) | ||
261 | { | ||
262 | gamma_dma_schedule((drm_device_t *)dev, 0); | ||
263 | } | ||
264 | |||
265 | void gamma_irq_immediate_bh(void *dev) | ||
266 | { | ||
267 | gamma_dma_schedule(dev, 0); | ||
268 | } | ||
269 | |||
270 | int gamma_dma_schedule(drm_device_t *dev, int locked) | ||
271 | { | ||
272 | int next; | ||
273 | drm_queue_t *q; | ||
274 | drm_buf_t *buf; | ||
275 | int retcode = 0; | ||
276 | int processed = 0; | ||
277 | int missed; | ||
278 | int expire = 20; | ||
279 | drm_device_dma_t *dma = dev->dma; | ||
280 | |||
281 | if (test_and_set_bit(0, &dev->interrupt_flag)) { | ||
282 | /* Not reentrant */ | ||
283 | atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */ | ||
284 | return -EBUSY; | ||
285 | } | ||
286 | missed = atomic_read(&dev->counts[10]); | ||
287 | |||
288 | |||
289 | again: | ||
290 | if (dev->context_flag) { | ||
291 | clear_bit(0, &dev->interrupt_flag); | ||
292 | return -EBUSY; | ||
293 | } | ||
294 | if (dma->next_buffer) { | ||
295 | /* Unsent buffer that was previously | ||
296 | selected, but that couldn't be sent | ||
297 | because the lock could not be obtained | ||
298 | or the DMA engine wasn't ready. Try | ||
299 | again. */ | ||
300 | if (!(retcode = gamma_do_dma(dev, locked))) ++processed; | ||
301 | } else { | ||
302 | do { | ||
303 | next = gamma_select_queue(dev, gamma_dma_timer_bh); | ||
304 | if (next >= 0) { | ||
305 | q = dev->queuelist[next]; | ||
306 | buf = gamma_waitlist_get(&q->waitlist); | ||
307 | dma->next_buffer = buf; | ||
308 | dma->next_queue = q; | ||
309 | if (buf && buf->list == DRM_LIST_RECLAIM) { | ||
310 | gamma_clear_next_buffer(dev); | ||
311 | gamma_free_buffer(dev, buf); | ||
312 | } | ||
313 | } | ||
314 | } while (next >= 0 && !dma->next_buffer); | ||
315 | if (dma->next_buffer) { | ||
316 | if (!(retcode = gamma_do_dma(dev, locked))) { | ||
317 | ++processed; | ||
318 | } | ||
319 | } | ||
320 | } | ||
321 | |||
322 | if (--expire) { | ||
323 | if (missed != atomic_read(&dev->counts[10])) { | ||
324 | if (gamma_dma_is_ready(dev)) goto again; | ||
325 | } | ||
326 | if (processed && gamma_dma_is_ready(dev)) { | ||
327 | processed = 0; | ||
328 | goto again; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | clear_bit(0, &dev->interrupt_flag); | ||
333 | |||
334 | return retcode; | ||
335 | } | ||
336 | |||
337 | static int gamma_dma_priority(struct file *filp, | ||
338 | drm_device_t *dev, drm_dma_t *d) | ||
339 | { | ||
340 | unsigned long address; | ||
341 | unsigned long length; | ||
342 | int must_free = 0; | ||
343 | int retcode = 0; | ||
344 | int i; | ||
345 | int idx; | ||
346 | drm_buf_t *buf; | ||
347 | drm_buf_t *last_buf = NULL; | ||
348 | drm_device_dma_t *dma = dev->dma; | ||
349 | int *send_indices = NULL; | ||
350 | int *send_sizes = NULL; | ||
351 | |||
352 | DECLARE_WAITQUEUE(entry, current); | ||
353 | |||
354 | /* Turn off interrupt handling */ | ||
355 | while (test_and_set_bit(0, &dev->interrupt_flag)) { | ||
356 | schedule(); | ||
357 | if (signal_pending(current)) return -EINTR; | ||
358 | } | ||
359 | if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) { | ||
360 | while (!gamma_lock_take(&dev->lock.hw_lock->lock, | ||
361 | DRM_KERNEL_CONTEXT)) { | ||
362 | schedule(); | ||
363 | if (signal_pending(current)) { | ||
364 | clear_bit(0, &dev->interrupt_flag); | ||
365 | return -EINTR; | ||
366 | } | ||
367 | } | ||
368 | ++must_free; | ||
369 | } | ||
370 | |||
371 | send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices), | ||
372 | DRM_MEM_DRIVER); | ||
373 | if (send_indices == NULL) | ||
374 | return -ENOMEM; | ||
375 | if (copy_from_user(send_indices, d->send_indices, | ||
376 | d->send_count * sizeof(*send_indices))) { | ||
377 | retcode = -EFAULT; | ||
378 | goto cleanup; | ||
379 | } | ||
380 | |||
381 | send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes), | ||
382 | DRM_MEM_DRIVER); | ||
383 | if (send_sizes == NULL) | ||
384 | return -ENOMEM; | ||
385 | if (copy_from_user(send_sizes, d->send_sizes, | ||
386 | d->send_count * sizeof(*send_sizes))) { | ||
387 | retcode = -EFAULT; | ||
388 | goto cleanup; | ||
389 | } | ||
390 | |||
391 | for (i = 0; i < d->send_count; i++) { | ||
392 | idx = send_indices[i]; | ||
393 | if (idx < 0 || idx >= dma->buf_count) { | ||
394 | DRM_ERROR("Index %d (of %d max)\n", | ||
395 | send_indices[i], dma->buf_count - 1); | ||
396 | continue; | ||
397 | } | ||
398 | buf = dma->buflist[ idx ]; | ||
399 | if (buf->filp != filp) { | ||
400 | DRM_ERROR("Process %d using buffer not owned\n", | ||
401 | current->pid); | ||
402 | retcode = -EINVAL; | ||
403 | goto cleanup; | ||
404 | } | ||
405 | if (buf->list != DRM_LIST_NONE) { | ||
406 | DRM_ERROR("Process %d using buffer on list %d\n", | ||
407 | current->pid, buf->list); | ||
408 | retcode = -EINVAL; | ||
409 | goto cleanup; | ||
410 | } | ||
411 | /* This isn't a race condition on | ||
412 | buf->list, since our concern is the | ||
413 | buffer reclaim during the time the | ||
414 | process closes the /dev/drm? handle, so | ||
415 | it can't also be doing DMA. */ | ||
416 | buf->list = DRM_LIST_PRIO; | ||
417 | buf->used = send_sizes[i]; | ||
418 | buf->context = d->context; | ||
419 | buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED; | ||
420 | address = (unsigned long)buf->address; | ||
421 | length = buf->used; | ||
422 | if (!length) { | ||
423 | DRM_ERROR("0 length buffer\n"); | ||
424 | } | ||
425 | if (buf->pending) { | ||
426 | DRM_ERROR("Sending pending buffer:" | ||
427 | " buffer %d, offset %d\n", | ||
428 | send_indices[i], i); | ||
429 | retcode = -EINVAL; | ||
430 | goto cleanup; | ||
431 | } | ||
432 | if (buf->waiting) { | ||
433 | DRM_ERROR("Sending waiting buffer:" | ||
434 | " buffer %d, offset %d\n", | ||
435 | send_indices[i], i); | ||
436 | retcode = -EINVAL; | ||
437 | goto cleanup; | ||
438 | } | ||
439 | buf->pending = 1; | ||
440 | |||
441 | if (dev->last_context != buf->context | ||
442 | && !(dev->queuelist[buf->context]->flags | ||
443 | & _DRM_CONTEXT_PRESERVED)) { | ||
444 | add_wait_queue(&dev->context_wait, &entry); | ||
445 | current->state = TASK_INTERRUPTIBLE; | ||
446 | /* PRE: dev->last_context != buf->context */ | ||
447 | DRM(context_switch)(dev, dev->last_context, | ||
448 | buf->context); | ||
449 | /* POST: we will wait for the context | ||
450 | switch and will dispatch on a later call | ||
451 | when dev->last_context == buf->context. | ||
452 | NOTE WE HOLD THE LOCK THROUGHOUT THIS | ||
453 | TIME! */ | ||
454 | schedule(); | ||
455 | current->state = TASK_RUNNING; | ||
456 | remove_wait_queue(&dev->context_wait, &entry); | ||
457 | if (signal_pending(current)) { | ||
458 | retcode = -EINTR; | ||
459 | goto cleanup; | ||
460 | } | ||
461 | if (dev->last_context != buf->context) { | ||
462 | DRM_ERROR("Context mismatch: %d %d\n", | ||
463 | dev->last_context, | ||
464 | buf->context); | ||
465 | } | ||
466 | } | ||
467 | |||
468 | gamma_dma_dispatch(dev, address, length); | ||
469 | atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */ | ||
470 | atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */ | ||
471 | |||
472 | if (last_buf) { | ||
473 | gamma_free_buffer(dev, last_buf); | ||
474 | } | ||
475 | last_buf = buf; | ||
476 | } | ||
477 | |||
478 | |||
479 | cleanup: | ||
480 | if (last_buf) { | ||
481 | gamma_dma_ready(dev); | ||
482 | gamma_free_buffer(dev, last_buf); | ||
483 | } | ||
484 | if (send_indices) | ||
485 | DRM(free)(send_indices, d->send_count * sizeof(*send_indices), | ||
486 | DRM_MEM_DRIVER); | ||
487 | if (send_sizes) | ||
488 | DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes), | ||
489 | DRM_MEM_DRIVER); | ||
490 | |||
491 | if (must_free && !dev->context_flag) { | ||
492 | if (gamma_lock_free(dev, &dev->lock.hw_lock->lock, | ||
493 | DRM_KERNEL_CONTEXT)) { | ||
494 | DRM_ERROR("\n"); | ||
495 | } | ||
496 | } | ||
497 | clear_bit(0, &dev->interrupt_flag); | ||
498 | return retcode; | ||
499 | } | ||
500 | |||
501 | static int gamma_dma_send_buffers(struct file *filp, | ||
502 | drm_device_t *dev, drm_dma_t *d) | ||
503 | { | ||
504 | DECLARE_WAITQUEUE(entry, current); | ||
505 | drm_buf_t *last_buf = NULL; | ||
506 | int retcode = 0; | ||
507 | drm_device_dma_t *dma = dev->dma; | ||
508 | int send_index; | ||
509 | |||
510 | if (get_user(send_index, &d->send_indices[d->send_count-1])) | ||
511 | return -EFAULT; | ||
512 | |||
513 | if (d->flags & _DRM_DMA_BLOCK) { | ||
514 | last_buf = dma->buflist[send_index]; | ||
515 | add_wait_queue(&last_buf->dma_wait, &entry); | ||
516 | } | ||
517 | |||
518 | if ((retcode = gamma_dma_enqueue(filp, d))) { | ||
519 | if (d->flags & _DRM_DMA_BLOCK) | ||
520 | remove_wait_queue(&last_buf->dma_wait, &entry); | ||
521 | return retcode; | ||
522 | } | ||
523 | |||
524 | gamma_dma_schedule(dev, 0); | ||
525 | |||
526 | if (d->flags & _DRM_DMA_BLOCK) { | ||
527 | DRM_DEBUG("%d waiting\n", current->pid); | ||
528 | for (;;) { | ||
529 | current->state = TASK_INTERRUPTIBLE; | ||
530 | if (!last_buf->waiting && !last_buf->pending) | ||
531 | break; /* finished */ | ||
532 | schedule(); | ||
533 | if (signal_pending(current)) { | ||
534 | retcode = -EINTR; /* Can't restart */ | ||
535 | break; | ||
536 | } | ||
537 | } | ||
538 | current->state = TASK_RUNNING; | ||
539 | DRM_DEBUG("%d running\n", current->pid); | ||
540 | remove_wait_queue(&last_buf->dma_wait, &entry); | ||
541 | if (!retcode | ||
542 | || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) { | ||
543 | if (!waitqueue_active(&last_buf->dma_wait)) { | ||
544 | gamma_free_buffer(dev, last_buf); | ||
545 | } | ||
546 | } | ||
547 | if (retcode) { | ||
548 | DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n", | ||
549 | d->context, | ||
550 | last_buf->waiting, | ||
551 | last_buf->pending, | ||
552 | (long)DRM_WAITCOUNT(dev, d->context), | ||
553 | last_buf->idx, | ||
554 | last_buf->list, | ||
555 | current->pid); | ||
556 | } | ||
557 | } | ||
558 | return retcode; | ||
559 | } | ||
560 | |||
561 | int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd, | ||
562 | unsigned long arg) | ||
563 | { | ||
564 | drm_file_t *priv = filp->private_data; | ||
565 | drm_device_t *dev = priv->dev; | ||
566 | drm_device_dma_t *dma = dev->dma; | ||
567 | int retcode = 0; | ||
568 | drm_dma_t __user *argp = (void __user *)arg; | ||
569 | drm_dma_t d; | ||
570 | |||
571 | if (copy_from_user(&d, argp, sizeof(d))) | ||
572 | return -EFAULT; | ||
573 | |||
574 | if (d.send_count < 0 || d.send_count > dma->buf_count) { | ||
575 | DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n", | ||
576 | current->pid, d.send_count, dma->buf_count); | ||
577 | return -EINVAL; | ||
578 | } | ||
579 | |||
580 | if (d.request_count < 0 || d.request_count > dma->buf_count) { | ||
581 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | ||
582 | current->pid, d.request_count, dma->buf_count); | ||
583 | return -EINVAL; | ||
584 | } | ||
585 | |||
586 | if (d.send_count) { | ||
587 | if (d.flags & _DRM_DMA_PRIORITY) | ||
588 | retcode = gamma_dma_priority(filp, dev, &d); | ||
589 | else | ||
590 | retcode = gamma_dma_send_buffers(filp, dev, &d); | ||
591 | } | ||
592 | |||
593 | d.granted_count = 0; | ||
594 | |||
595 | if (!retcode && d.request_count) { | ||
596 | retcode = gamma_dma_get_buffers(filp, &d); | ||
597 | } | ||
598 | |||
599 | DRM_DEBUG("%d returning, granted = %d\n", | ||
600 | current->pid, d.granted_count); | ||
601 | if (copy_to_user(argp, &d, sizeof(d))) | ||
602 | return -EFAULT; | ||
603 | |||
604 | return retcode; | ||
605 | } | ||
606 | |||
607 | /* ============================================================= | ||
608 | * DMA initialization, cleanup | ||
609 | */ | ||
610 | |||
611 | static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) | ||
612 | { | ||
613 | drm_gamma_private_t *dev_priv; | ||
614 | drm_device_dma_t *dma = dev->dma; | ||
615 | drm_buf_t *buf; | ||
616 | int i; | ||
617 | struct list_head *list; | ||
618 | unsigned long *pgt; | ||
619 | |||
620 | DRM_DEBUG( "%s\n", __FUNCTION__ ); | ||
621 | |||
622 | dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t), | ||
623 | DRM_MEM_DRIVER ); | ||
624 | if ( !dev_priv ) | ||
625 | return -ENOMEM; | ||
626 | |||
627 | dev->dev_private = (void *)dev_priv; | ||
628 | |||
629 | memset( dev_priv, 0, sizeof(drm_gamma_private_t) ); | ||
630 | |||
631 | dev_priv->num_rast = init->num_rast; | ||
632 | |||
633 | list_for_each(list, &dev->maplist->head) { | ||
634 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); | ||
635 | if( r_list->map && | ||
636 | r_list->map->type == _DRM_SHM && | ||
637 | r_list->map->flags & _DRM_CONTAINS_LOCK ) { | ||
638 | dev_priv->sarea = r_list->map; | ||
639 | break; | ||
640 | } | ||
641 | } | ||
642 | |||
643 | dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0); | ||
644 | dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1); | ||
645 | dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2); | ||
646 | dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3); | ||
647 | |||
648 | dev_priv->sarea_priv = (drm_gamma_sarea_t *) | ||
649 | ((u8 *)dev_priv->sarea->handle + | ||
650 | init->sarea_priv_offset); | ||
651 | |||
652 | if (init->pcimode) { | ||
653 | buf = dma->buflist[GLINT_DRI_BUF_COUNT]; | ||
654 | pgt = buf->address; | ||
655 | |||
656 | for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { | ||
657 | buf = dma->buflist[i]; | ||
658 | *pgt = virt_to_phys((void*)buf->address) | 0x07; | ||
659 | pgt++; | ||
660 | } | ||
661 | |||
662 | buf = dma->buflist[GLINT_DRI_BUF_COUNT]; | ||
663 | } else { | ||
664 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | ||
665 | drm_core_ioremap( dev->agp_buffer_map, dev); | ||
666 | |||
667 | buf = dma->buflist[GLINT_DRI_BUF_COUNT]; | ||
668 | pgt = buf->address; | ||
669 | |||
670 | for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { | ||
671 | buf = dma->buflist[i]; | ||
672 | *pgt = (unsigned long)buf->address + 0x07; | ||
673 | pgt++; | ||
674 | } | ||
675 | |||
676 | buf = dma->buflist[GLINT_DRI_BUF_COUNT]; | ||
677 | |||
678 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1); | ||
679 | GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe); | ||
680 | } | ||
681 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2); | ||
682 | GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) ); | ||
683 | GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 ); | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | int gamma_do_cleanup_dma( drm_device_t *dev ) | ||
689 | { | ||
690 | DRM_DEBUG( "%s\n", __FUNCTION__ ); | ||
691 | |||
692 | /* Make sure interrupts are disabled here because the uninstall ioctl | ||
693 | * may not have been called from userspace and after dev_private | ||
694 | * is freed, it's too late. | ||
695 | */ | ||
696 | if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | ||
697 | if ( dev->irq_enabled ) | ||
698 | DRM(irq_uninstall)(dev); | ||
699 | |||
700 | if ( dev->dev_private ) { | ||
701 | |||
702 | if ( dev->agp_buffer_map != NULL ) | ||
703 | drm_core_ioremapfree( dev->agp_buffer_map, dev ); | ||
704 | |||
705 | DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t), | ||
706 | DRM_MEM_DRIVER ); | ||
707 | dev->dev_private = NULL; | ||
708 | } | ||
709 | |||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | int gamma_dma_init( struct inode *inode, struct file *filp, | ||
714 | unsigned int cmd, unsigned long arg ) | ||
715 | { | ||
716 | drm_file_t *priv = filp->private_data; | ||
717 | drm_device_t *dev = priv->dev; | ||
718 | drm_gamma_init_t init; | ||
719 | |||
720 | LOCK_TEST_WITH_RETURN( dev, filp ); | ||
721 | |||
722 | if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) ) | ||
723 | return -EFAULT; | ||
724 | |||
725 | switch ( init.func ) { | ||
726 | case GAMMA_INIT_DMA: | ||
727 | return gamma_do_init_dma( dev, &init ); | ||
728 | case GAMMA_CLEANUP_DMA: | ||
729 | return gamma_do_cleanup_dma( dev ); | ||
730 | } | ||
731 | |||
732 | return -EINVAL; | ||
733 | } | ||
734 | |||
735 | static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy ) | ||
736 | { | ||
737 | drm_device_dma_t *dma = dev->dma; | ||
738 | unsigned int *screenbuf; | ||
739 | |||
740 | DRM_DEBUG( "%s\n", __FUNCTION__ ); | ||
741 | |||
742 | /* We've DRM_RESTRICTED this DMA buffer */ | ||
743 | |||
744 | screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address; | ||
745 | |||
746 | #if 0 | ||
747 | *buffer++ = 0x180; /* Tag (FilterMode) */ | ||
748 | *buffer++ = 0x200; /* Allow FBColor through */ | ||
749 | *buffer++ = 0x53B; /* Tag */ | ||
750 | *buffer++ = copy->Pitch; | ||
751 | *buffer++ = 0x53A; /* Tag */ | ||
752 | *buffer++ = copy->SrcAddress; | ||
753 | *buffer++ = 0x539; /* Tag */ | ||
754 | *buffer++ = copy->WidthHeight; /* Initiates transfer */ | ||
755 | *buffer++ = 0x53C; /* Tag - DMAOutputAddress */ | ||
756 | *buffer++ = virt_to_phys((void*)screenbuf); | ||
757 | *buffer++ = 0x53D; /* Tag - DMAOutputCount */ | ||
758 | *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/ | ||
759 | |||
760 | /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */ | ||
761 | /* Now put it back to the screen */ | ||
762 | |||
763 | *buffer++ = 0x180; /* Tag (FilterMode) */ | ||
764 | *buffer++ = 0x400; /* Allow Sync through */ | ||
765 | *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */ | ||
766 | *buffer++ = 0x155; /* FBSourceData | count */ | ||
767 | *buffer++ = 0x537; /* Tag */ | ||
768 | *buffer++ = copy->Pitch; | ||
769 | *buffer++ = 0x536; /* Tag */ | ||
770 | *buffer++ = copy->DstAddress; | ||
771 | *buffer++ = 0x535; /* Tag */ | ||
772 | *buffer++ = copy->WidthHeight; /* Initiates transfer */ | ||
773 | *buffer++ = 0x530; /* Tag - DMAAddr */ | ||
774 | *buffer++ = virt_to_phys((void*)screenbuf); | ||
775 | *buffer++ = 0x531; | ||
776 | *buffer++ = copy->Count; /* initiates DMA transfer of color data */ | ||
777 | #endif | ||
778 | |||
779 | /* need to dispatch it now */ | ||
780 | |||
781 | return 0; | ||
782 | } | ||
783 | |||
784 | int gamma_dma_copy( struct inode *inode, struct file *filp, | ||
785 | unsigned int cmd, unsigned long arg ) | ||
786 | { | ||
787 | drm_file_t *priv = filp->private_data; | ||
788 | drm_device_t *dev = priv->dev; | ||
789 | drm_gamma_copy_t copy; | ||
790 | |||
791 | if ( copy_from_user( ©, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) ) | ||
792 | return -EFAULT; | ||
793 | |||
794 | return gamma_do_copy_dma( dev, © ); | ||
795 | } | ||
796 | |||
797 | /* ============================================================= | ||
798 | * Per Context SAREA Support | ||
799 | */ | ||
800 | |||
801 | int gamma_getsareactx(struct inode *inode, struct file *filp, | ||
802 | unsigned int cmd, unsigned long arg) | ||
803 | { | ||
804 | drm_file_t *priv = filp->private_data; | ||
805 | drm_device_t *dev = priv->dev; | ||
806 | drm_ctx_priv_map_t __user *argp = (void __user *)arg; | ||
807 | drm_ctx_priv_map_t request; | ||
808 | drm_map_t *map; | ||
809 | |||
810 | if (copy_from_user(&request, argp, sizeof(request))) | ||
811 | return -EFAULT; | ||
812 | |||
813 | down(&dev->struct_sem); | ||
814 | if ((int)request.ctx_id >= dev->max_context) { | ||
815 | up(&dev->struct_sem); | ||
816 | return -EINVAL; | ||
817 | } | ||
818 | |||
819 | map = dev->context_sareas[request.ctx_id]; | ||
820 | up(&dev->struct_sem); | ||
821 | |||
822 | request.handle = map->handle; | ||
823 | if (copy_to_user(argp, &request, sizeof(request))) | ||
824 | return -EFAULT; | ||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | int gamma_setsareactx(struct inode *inode, struct file *filp, | ||
829 | unsigned int cmd, unsigned long arg) | ||
830 | { | ||
831 | drm_file_t *priv = filp->private_data; | ||
832 | drm_device_t *dev = priv->dev; | ||
833 | drm_ctx_priv_map_t request; | ||
834 | drm_map_t *map = NULL; | ||
835 | drm_map_list_t *r_list; | ||
836 | struct list_head *list; | ||
837 | |||
838 | if (copy_from_user(&request, | ||
839 | (drm_ctx_priv_map_t __user *)arg, | ||
840 | sizeof(request))) | ||
841 | return -EFAULT; | ||
842 | |||
843 | down(&dev->struct_sem); | ||
844 | r_list = NULL; | ||
845 | list_for_each(list, &dev->maplist->head) { | ||
846 | r_list = list_entry(list, drm_map_list_t, head); | ||
847 | if(r_list->map && | ||
848 | r_list->map->handle == request.handle) break; | ||
849 | } | ||
850 | if (list == &(dev->maplist->head)) { | ||
851 | up(&dev->struct_sem); | ||
852 | return -EINVAL; | ||
853 | } | ||
854 | map = r_list->map; | ||
855 | up(&dev->struct_sem); | ||
856 | |||
857 | if (!map) return -EINVAL; | ||
858 | |||
859 | down(&dev->struct_sem); | ||
860 | if ((int)request.ctx_id >= dev->max_context) { | ||
861 | up(&dev->struct_sem); | ||
862 | return -EINVAL; | ||
863 | } | ||
864 | dev->context_sareas[request.ctx_id] = map; | ||
865 | up(&dev->struct_sem); | ||
866 | return 0; | ||
867 | } | ||
868 | |||
869 | void gamma_driver_irq_preinstall( drm_device_t *dev ) { | ||
870 | drm_gamma_private_t *dev_priv = | ||
871 | (drm_gamma_private_t *)dev->dev_private; | ||
872 | |||
873 | while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) | ||
874 | cpu_relax(); | ||
875 | |||
876 | GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 ); | ||
877 | GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); | ||
878 | } | ||
879 | |||
880 | void gamma_driver_irq_postinstall( drm_device_t *dev ) { | ||
881 | drm_gamma_private_t *dev_priv = | ||
882 | (drm_gamma_private_t *)dev->dev_private; | ||
883 | |||
884 | while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) | ||
885 | cpu_relax(); | ||
886 | |||
887 | GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); | ||
888 | GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); | ||
889 | GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); | ||
890 | } | ||
891 | |||
892 | void gamma_driver_irq_uninstall( drm_device_t *dev ) { | ||
893 | drm_gamma_private_t *dev_priv = | ||
894 | (drm_gamma_private_t *)dev->dev_private; | ||
895 | if (!dev_priv) | ||
896 | return; | ||
897 | |||
898 | while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) | ||
899 | cpu_relax(); | ||
900 | |||
901 | GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); | ||
902 | GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); | ||
903 | GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); | ||
904 | } | ||
905 | |||
906 | extern drm_ioctl_desc_t DRM(ioctls)[]; | ||
907 | |||
908 | static int gamma_driver_preinit(drm_device_t *dev) | ||
909 | { | ||
910 | /* reset the finish ioctl */ | ||
911 | DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish); | ||
912 | return 0; | ||
913 | } | ||
914 | |||
915 | static void gamma_driver_pretakedown(drm_device_t *dev) | ||
916 | { | ||
917 | gamma_do_cleanup_dma(dev); | ||
918 | } | ||
919 | |||
920 | static void gamma_driver_dma_ready(drm_device_t *dev) | ||
921 | { | ||
922 | gamma_dma_ready(dev); | ||
923 | } | ||
924 | |||
925 | static int gamma_driver_dma_quiescent(drm_device_t *dev) | ||
926 | { | ||
927 | drm_gamma_private_t *dev_priv = ( | ||
928 | drm_gamma_private_t *)dev->dev_private; | ||
929 | if (dev_priv->num_rast == 2) | ||
930 | gamma_dma_quiescent_dual(dev); | ||
931 | else gamma_dma_quiescent_single(dev); | ||
932 | return 0; | ||
933 | } | ||
934 | |||
935 | void gamma_driver_register_fns(drm_device_t *dev) | ||
936 | { | ||
937 | dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ; | ||
938 | DRM(fops).read = gamma_fops_read; | ||
939 | DRM(fops).poll = gamma_fops_poll; | ||
940 | dev->driver.preinit = gamma_driver_preinit; | ||
941 | dev->driver.pretakedown = gamma_driver_pretakedown; | ||
942 | dev->driver.dma_ready = gamma_driver_dma_ready; | ||
943 | dev->driver.dma_quiescent = gamma_driver_dma_quiescent; | ||
944 | dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush; | ||
945 | dev->driver.dma_flush_unblock = gamma_flush_unblock; | ||
946 | } | ||
diff --git a/drivers/char/drm/gamma_drm.h b/drivers/char/drm/gamma_drm.h deleted file mode 100644 index 20819ded0e15..000000000000 --- a/drivers/char/drm/gamma_drm.h +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | #ifndef _GAMMA_DRM_H_ | ||
2 | #define _GAMMA_DRM_H_ | ||
3 | |||
4 | typedef struct _drm_gamma_tex_region { | ||
5 | unsigned char next, prev; /* indices to form a circular LRU */ | ||
6 | unsigned char in_use; /* owned by a client, or free? */ | ||
7 | int age; /* tracked by clients to update local LRU's */ | ||
8 | } drm_gamma_tex_region_t; | ||
9 | |||
10 | typedef struct { | ||
11 | unsigned int GDeltaMode; | ||
12 | unsigned int GDepthMode; | ||
13 | unsigned int GGeometryMode; | ||
14 | unsigned int GTransformMode; | ||
15 | } drm_gamma_context_regs_t; | ||
16 | |||
17 | typedef struct _drm_gamma_sarea { | ||
18 | drm_gamma_context_regs_t context_state; | ||
19 | |||
20 | unsigned int dirty; | ||
21 | |||
22 | |||
23 | /* Maintain an LRU of contiguous regions of texture space. If | ||
24 | * you think you own a region of texture memory, and it has an | ||
25 | * age different to the one you set, then you are mistaken and | ||
26 | * it has been stolen by another client. If global texAge | ||
27 | * hasn't changed, there is no need to walk the list. | ||
28 | * | ||
29 | * These regions can be used as a proxy for the fine-grained | ||
30 | * texture information of other clients - by maintaining them | ||
31 | * in the same lru which is used to age their own textures, | ||
32 | * clients have an approximate lru for the whole of global | ||
33 | * texture space, and can make informed decisions as to which | ||
34 | * areas to kick out. There is no need to choose whether to | ||
35 | * kick out your own texture or someone else's - simply eject | ||
36 | * them all in LRU order. | ||
37 | */ | ||
38 | |||
39 | #define GAMMA_NR_TEX_REGIONS 64 | ||
40 | drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1]; | ||
41 | /* Last elt is sentinal */ | ||
42 | int texAge; /* last time texture was uploaded */ | ||
43 | int last_enqueue; /* last time a buffer was enqueued */ | ||
44 | int last_dispatch; /* age of the most recently dispatched buffer */ | ||
45 | int last_quiescent; /* */ | ||
46 | int ctxOwner; /* last context to upload state */ | ||
47 | |||
48 | int vertex_prim; | ||
49 | } drm_gamma_sarea_t; | ||
50 | |||
51 | /* WARNING: If you change any of these defines, make sure to change the | ||
52 | * defines in the Xserver file (xf86drmGamma.h) | ||
53 | */ | ||
54 | |||
55 | /* Gamma specific ioctls | ||
56 | * The device specific ioctl range is 0x40 to 0x79. | ||
57 | */ | ||
58 | #define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t) | ||
59 | #define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t) | ||
60 | |||
61 | typedef struct drm_gamma_copy { | ||
62 | unsigned int DMAOutputAddress; | ||
63 | unsigned int DMAOutputCount; | ||
64 | unsigned int DMAReadGLINTSource; | ||
65 | unsigned int DMARectangleWriteAddress; | ||
66 | unsigned int DMARectangleWriteLinePitch; | ||
67 | unsigned int DMARectangleWrite; | ||
68 | unsigned int DMARectangleReadAddress; | ||
69 | unsigned int DMARectangleReadLinePitch; | ||
70 | unsigned int DMARectangleRead; | ||
71 | unsigned int DMARectangleReadTarget; | ||
72 | } drm_gamma_copy_t; | ||
73 | |||
74 | typedef struct drm_gamma_init { | ||
75 | enum { | ||
76 | GAMMA_INIT_DMA = 0x01, | ||
77 | GAMMA_CLEANUP_DMA = 0x02 | ||
78 | } func; | ||
79 | |||
80 | int sarea_priv_offset; | ||
81 | int pcimode; | ||
82 | unsigned int mmio0; | ||
83 | unsigned int mmio1; | ||
84 | unsigned int mmio2; | ||
85 | unsigned int mmio3; | ||
86 | unsigned int buffers_offset; | ||
87 | int num_rast; | ||
88 | } drm_gamma_init_t; | ||
89 | |||
90 | #endif /* _GAMMA_DRM_H_ */ | ||
diff --git a/drivers/char/drm/gamma_drv.c b/drivers/char/drm/gamma_drv.c deleted file mode 100644 index e7e64b62792a..000000000000 --- a/drivers/char/drm/gamma_drv.c +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*- | ||
2 | * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | #include <linux/config.h> | ||
33 | #include "gamma.h" | ||
34 | #include "drmP.h" | ||
35 | #include "drm.h" | ||
36 | #include "gamma_drm.h" | ||
37 | #include "gamma_drv.h" | ||
38 | |||
39 | #include "drm_auth.h" | ||
40 | #include "drm_agpsupport.h" | ||
41 | #include "drm_bufs.h" | ||
42 | #include "gamma_context.h" /* NOTE! */ | ||
43 | #include "drm_dma.h" | ||
44 | #include "gamma_old_dma.h" /* NOTE */ | ||
45 | #include "drm_drawable.h" | ||
46 | #include "drm_drv.h" | ||
47 | |||
48 | #include "drm_fops.h" | ||
49 | #include "drm_init.h" | ||
50 | #include "drm_ioctl.h" | ||
51 | #include "drm_irq.h" | ||
52 | #include "gamma_lists.h" /* NOTE */ | ||
53 | #include "drm_lock.h" | ||
54 | #include "gamma_lock.h" /* NOTE */ | ||
55 | #include "drm_memory.h" | ||
56 | #include "drm_proc.h" | ||
57 | #include "drm_vm.h" | ||
58 | #include "drm_stub.h" | ||
59 | #include "drm_scatter.h" | ||
diff --git a/drivers/char/drm/gamma_drv.h b/drivers/char/drm/gamma_drv.h deleted file mode 100644 index 146fcc6253cd..000000000000 --- a/drivers/char/drm/gamma_drv.h +++ /dev/null | |||
@@ -1,147 +0,0 @@ | |||
1 | /* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*- | ||
2 | * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #ifndef _GAMMA_DRV_H_ | ||
33 | #define _GAMMA_DRV_H_ | ||
34 | |||
35 | typedef struct drm_gamma_private { | ||
36 | drm_gamma_sarea_t *sarea_priv; | ||
37 | drm_map_t *sarea; | ||
38 | drm_map_t *mmio0; | ||
39 | drm_map_t *mmio1; | ||
40 | drm_map_t *mmio2; | ||
41 | drm_map_t *mmio3; | ||
42 | int num_rast; | ||
43 | } drm_gamma_private_t; | ||
44 | |||
45 | /* gamma_dma.c */ | ||
46 | extern int gamma_dma_init( struct inode *inode, struct file *filp, | ||
47 | unsigned int cmd, unsigned long arg ); | ||
48 | extern int gamma_dma_copy( struct inode *inode, struct file *filp, | ||
49 | unsigned int cmd, unsigned long arg ); | ||
50 | |||
51 | extern int gamma_do_cleanup_dma( drm_device_t *dev ); | ||
52 | extern void gamma_dma_ready(drm_device_t *dev); | ||
53 | extern void gamma_dma_quiescent_single(drm_device_t *dev); | ||
54 | extern void gamma_dma_quiescent_dual(drm_device_t *dev); | ||
55 | |||
56 | /* gamma_dma.c */ | ||
57 | extern int gamma_dma_schedule(drm_device_t *dev, int locked); | ||
58 | extern int gamma_dma(struct inode *inode, struct file *filp, | ||
59 | unsigned int cmd, unsigned long arg); | ||
60 | extern int gamma_find_devices(void); | ||
61 | extern int gamma_found(void); | ||
62 | |||
63 | /* Gamma-specific code pulled from drm_fops.h: | ||
64 | */ | ||
65 | extern int DRM(finish)(struct inode *inode, struct file *filp, | ||
66 | unsigned int cmd, unsigned long arg); | ||
67 | extern int DRM(flush_unblock)(drm_device_t *dev, int context, | ||
68 | drm_lock_flags_t flags); | ||
69 | extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context, | ||
70 | drm_lock_flags_t flags); | ||
71 | |||
72 | /* Gamma-specific code pulled from drm_dma.h: | ||
73 | */ | ||
74 | extern void DRM(clear_next_buffer)(drm_device_t *dev); | ||
75 | extern int DRM(select_queue)(drm_device_t *dev, | ||
76 | void (*wrapper)(unsigned long)); | ||
77 | extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma); | ||
78 | extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma); | ||
79 | |||
80 | |||
81 | /* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h): | ||
82 | */ | ||
83 | extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count); | ||
84 | extern int DRM(waitlist_destroy)(drm_waitlist_t *bl); | ||
85 | extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf); | ||
86 | extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl); | ||
87 | extern int DRM(freelist_create)(drm_freelist_t *bl, int count); | ||
88 | extern int DRM(freelist_destroy)(drm_freelist_t *bl); | ||
89 | extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, | ||
90 | drm_buf_t *buf); | ||
91 | extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block); | ||
92 | |||
93 | /* externs for gamma changes to the ops */ | ||
94 | extern struct file_operations DRM(fops); | ||
95 | extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait); | ||
96 | extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off); | ||
97 | |||
98 | |||
99 | #define GLINT_DRI_BUF_COUNT 256 | ||
100 | |||
101 | #define GAMMA_OFF(reg) \ | ||
102 | ((reg < 0x1000) \ | ||
103 | ? reg \ | ||
104 | : ((reg < 0x10000) \ | ||
105 | ? (reg - 0x1000) \ | ||
106 | : ((reg < 0x11000) \ | ||
107 | ? (reg - 0x10000) \ | ||
108 | : (reg - 0x11000)))) | ||
109 | |||
110 | #define GAMMA_BASE(reg) ((unsigned long) \ | ||
111 | ((reg < 0x1000) ? dev_priv->mmio0->handle : \ | ||
112 | ((reg < 0x10000) ? dev_priv->mmio1->handle : \ | ||
113 | ((reg < 0x11000) ? dev_priv->mmio2->handle : \ | ||
114 | dev_priv->mmio3->handle)))) | ||
115 | #define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg)) | ||
116 | #define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg) | ||
117 | #define GAMMA_READ(reg) GAMMA_DEREF(reg) | ||
118 | #define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0) | ||
119 | |||
120 | #define GAMMA_BROADCASTMASK 0x9378 | ||
121 | #define GAMMA_COMMANDINTENABLE 0x0c48 | ||
122 | #define GAMMA_DMAADDRESS 0x0028 | ||
123 | #define GAMMA_DMACOUNT 0x0030 | ||
124 | #define GAMMA_FILTERMODE 0x8c00 | ||
125 | #define GAMMA_GCOMMANDINTFLAGS 0x0c50 | ||
126 | #define GAMMA_GCOMMANDMODE 0x0c40 | ||
127 | #define GAMMA_QUEUED_DMA_MODE 1<<1 | ||
128 | #define GAMMA_GCOMMANDSTATUS 0x0c60 | ||
129 | #define GAMMA_GDELAYTIMER 0x0c38 | ||
130 | #define GAMMA_GDMACONTROL 0x0060 | ||
131 | #define GAMMA_USE_AGP 1<<1 | ||
132 | #define GAMMA_GINTENABLE 0x0808 | ||
133 | #define GAMMA_GINTFLAGS 0x0810 | ||
134 | #define GAMMA_INFIFOSPACE 0x0018 | ||
135 | #define GAMMA_OUTFIFOWORDS 0x0020 | ||
136 | #define GAMMA_OUTPUTFIFO 0x2000 | ||
137 | #define GAMMA_SYNC 0x8c40 | ||
138 | #define GAMMA_SYNC_TAG 0x0188 | ||
139 | #define GAMMA_PAGETABLEADDR 0x0C00 | ||
140 | #define GAMMA_PAGETABLELENGTH 0x0C08 | ||
141 | |||
142 | #define GAMMA_PASSTHROUGH 0x1FE | ||
143 | #define GAMMA_DMAADDRTAG 0x530 | ||
144 | #define GAMMA_DMACOUNTTAG 0x531 | ||
145 | #define GAMMA_COMMANDINTTAG 0x532 | ||
146 | |||
147 | #endif | ||
diff --git a/drivers/char/drm/gamma_lists.h b/drivers/char/drm/gamma_lists.h deleted file mode 100644 index 2d93f412b96b..000000000000 --- a/drivers/char/drm/gamma_lists.h +++ /dev/null | |||
@@ -1,215 +0,0 @@ | |||
1 | /* drm_lists.h -- Buffer list handling routines -*- linux-c -*- | ||
2 | * Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | #include "drmP.h" | ||
33 | |||
34 | |||
35 | int DRM(waitlist_create)(drm_waitlist_t *bl, int count) | ||
36 | { | ||
37 | if (bl->count) return -EINVAL; | ||
38 | |||
39 | bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs), | ||
40 | DRM_MEM_BUFLISTS); | ||
41 | |||
42 | if(!bl->bufs) return -ENOMEM; | ||
43 | memset(bl->bufs, 0, sizeof(*bl->bufs)); | ||
44 | bl->count = count; | ||
45 | bl->rp = bl->bufs; | ||
46 | bl->wp = bl->bufs; | ||
47 | bl->end = &bl->bufs[bl->count+1]; | ||
48 | spin_lock_init(&bl->write_lock); | ||
49 | spin_lock_init(&bl->read_lock); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | int DRM(waitlist_destroy)(drm_waitlist_t *bl) | ||
54 | { | ||
55 | if (bl->rp != bl->wp) return -EINVAL; | ||
56 | if (bl->bufs) DRM(free)(bl->bufs, | ||
57 | (bl->count + 2) * sizeof(*bl->bufs), | ||
58 | DRM_MEM_BUFLISTS); | ||
59 | bl->count = 0; | ||
60 | bl->bufs = NULL; | ||
61 | bl->rp = NULL; | ||
62 | bl->wp = NULL; | ||
63 | bl->end = NULL; | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf) | ||
68 | { | ||
69 | int left; | ||
70 | unsigned long flags; | ||
71 | |||
72 | left = DRM_LEFTCOUNT(bl); | ||
73 | if (!left) { | ||
74 | DRM_ERROR("Overflow while adding buffer %d from filp %p\n", | ||
75 | buf->idx, buf->filp); | ||
76 | return -EINVAL; | ||
77 | } | ||
78 | buf->list = DRM_LIST_WAIT; | ||
79 | |||
80 | spin_lock_irqsave(&bl->write_lock, flags); | ||
81 | *bl->wp = buf; | ||
82 | if (++bl->wp >= bl->end) bl->wp = bl->bufs; | ||
83 | spin_unlock_irqrestore(&bl->write_lock, flags); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl) | ||
89 | { | ||
90 | drm_buf_t *buf; | ||
91 | unsigned long flags; | ||
92 | |||
93 | spin_lock_irqsave(&bl->read_lock, flags); | ||
94 | buf = *bl->rp; | ||
95 | if (bl->rp == bl->wp) { | ||
96 | spin_unlock_irqrestore(&bl->read_lock, flags); | ||
97 | return NULL; | ||
98 | } | ||
99 | if (++bl->rp >= bl->end) bl->rp = bl->bufs; | ||
100 | spin_unlock_irqrestore(&bl->read_lock, flags); | ||
101 | |||
102 | return buf; | ||
103 | } | ||
104 | |||
105 | int DRM(freelist_create)(drm_freelist_t *bl, int count) | ||
106 | { | ||
107 | atomic_set(&bl->count, 0); | ||
108 | bl->next = NULL; | ||
109 | init_waitqueue_head(&bl->waiting); | ||
110 | bl->low_mark = 0; | ||
111 | bl->high_mark = 0; | ||
112 | atomic_set(&bl->wfh, 0); | ||
113 | spin_lock_init(&bl->lock); | ||
114 | ++bl->initialized; | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | int DRM(freelist_destroy)(drm_freelist_t *bl) | ||
119 | { | ||
120 | atomic_set(&bl->count, 0); | ||
121 | bl->next = NULL; | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf) | ||
126 | { | ||
127 | drm_device_dma_t *dma = dev->dma; | ||
128 | |||
129 | if (!dma) { | ||
130 | DRM_ERROR("No DMA support\n"); | ||
131 | return 1; | ||
132 | } | ||
133 | |||
134 | if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) { | ||
135 | DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n", | ||
136 | buf->idx, buf->waiting, buf->pending, buf->list); | ||
137 | } | ||
138 | if (!bl) return 1; | ||
139 | buf->list = DRM_LIST_FREE; | ||
140 | |||
141 | spin_lock(&bl->lock); | ||
142 | buf->next = bl->next; | ||
143 | bl->next = buf; | ||
144 | spin_unlock(&bl->lock); | ||
145 | |||
146 | atomic_inc(&bl->count); | ||
147 | if (atomic_read(&bl->count) > dma->buf_count) { | ||
148 | DRM_ERROR("%d of %d buffers free after addition of %d\n", | ||
149 | atomic_read(&bl->count), dma->buf_count, buf->idx); | ||
150 | return 1; | ||
151 | } | ||
152 | /* Check for high water mark */ | ||
153 | if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) { | ||
154 | atomic_set(&bl->wfh, 0); | ||
155 | wake_up_interruptible(&bl->waiting); | ||
156 | } | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl) | ||
161 | { | ||
162 | drm_buf_t *buf; | ||
163 | |||
164 | if (!bl) return NULL; | ||
165 | |||
166 | /* Get buffer */ | ||
167 | spin_lock(&bl->lock); | ||
168 | if (!bl->next) { | ||
169 | spin_unlock(&bl->lock); | ||
170 | return NULL; | ||
171 | } | ||
172 | buf = bl->next; | ||
173 | bl->next = bl->next->next; | ||
174 | spin_unlock(&bl->lock); | ||
175 | |||
176 | atomic_dec(&bl->count); | ||
177 | buf->next = NULL; | ||
178 | buf->list = DRM_LIST_NONE; | ||
179 | if (buf->waiting || buf->pending) { | ||
180 | DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n", | ||
181 | buf->idx, buf->waiting, buf->pending, buf->list); | ||
182 | } | ||
183 | |||
184 | return buf; | ||
185 | } | ||
186 | |||
187 | drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block) | ||
188 | { | ||
189 | drm_buf_t *buf = NULL; | ||
190 | DECLARE_WAITQUEUE(entry, current); | ||
191 | |||
192 | if (!bl || !bl->initialized) return NULL; | ||
193 | |||
194 | /* Check for low water mark */ | ||
195 | if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */ | ||
196 | atomic_set(&bl->wfh, 1); | ||
197 | if (atomic_read(&bl->wfh)) { | ||
198 | if (block) { | ||
199 | add_wait_queue(&bl->waiting, &entry); | ||
200 | for (;;) { | ||
201 | current->state = TASK_INTERRUPTIBLE; | ||
202 | if (!atomic_read(&bl->wfh) | ||
203 | && (buf = DRM(freelist_try)(bl))) break; | ||
204 | schedule(); | ||
205 | if (signal_pending(current)) break; | ||
206 | } | ||
207 | current->state = TASK_RUNNING; | ||
208 | remove_wait_queue(&bl->waiting, &entry); | ||
209 | } | ||
210 | return buf; | ||
211 | } | ||
212 | |||
213 | return DRM(freelist_try)(bl); | ||
214 | } | ||
215 | |||
diff --git a/drivers/char/drm/gamma_lock.h b/drivers/char/drm/gamma_lock.h deleted file mode 100644 index ddec67e4ed16..000000000000 --- a/drivers/char/drm/gamma_lock.h +++ /dev/null | |||
@@ -1,140 +0,0 @@ | |||
1 | /* lock.c -- IOCTLs for locking -*- linux-c -*- | ||
2 | * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | |||
33 | /* Gamma-specific code extracted from drm_lock.h: | ||
34 | */ | ||
35 | static int DRM(flush_queue)(drm_device_t *dev, int context) | ||
36 | { | ||
37 | DECLARE_WAITQUEUE(entry, current); | ||
38 | int ret = 0; | ||
39 | drm_queue_t *q = dev->queuelist[context]; | ||
40 | |||
41 | DRM_DEBUG("\n"); | ||
42 | |||
43 | atomic_inc(&q->use_count); | ||
44 | if (atomic_read(&q->use_count) > 1) { | ||
45 | atomic_inc(&q->block_write); | ||
46 | add_wait_queue(&q->flush_queue, &entry); | ||
47 | atomic_inc(&q->block_count); | ||
48 | for (;;) { | ||
49 | current->state = TASK_INTERRUPTIBLE; | ||
50 | if (!DRM_BUFCOUNT(&q->waitlist)) break; | ||
51 | schedule(); | ||
52 | if (signal_pending(current)) { | ||
53 | ret = -EINTR; /* Can't restart */ | ||
54 | break; | ||
55 | } | ||
56 | } | ||
57 | atomic_dec(&q->block_count); | ||
58 | current->state = TASK_RUNNING; | ||
59 | remove_wait_queue(&q->flush_queue, &entry); | ||
60 | } | ||
61 | atomic_dec(&q->use_count); | ||
62 | |||
63 | /* NOTE: block_write is still incremented! | ||
64 | Use drm_flush_unlock_queue to decrement. */ | ||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static int DRM(flush_unblock_queue)(drm_device_t *dev, int context) | ||
69 | { | ||
70 | drm_queue_t *q = dev->queuelist[context]; | ||
71 | |||
72 | DRM_DEBUG("\n"); | ||
73 | |||
74 | atomic_inc(&q->use_count); | ||
75 | if (atomic_read(&q->use_count) > 1) { | ||
76 | if (atomic_read(&q->block_write)) { | ||
77 | atomic_dec(&q->block_write); | ||
78 | wake_up_interruptible(&q->write_queue); | ||
79 | } | ||
80 | } | ||
81 | atomic_dec(&q->use_count); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | int DRM(flush_block_and_flush)(drm_device_t *dev, int context, | ||
86 | drm_lock_flags_t flags) | ||
87 | { | ||
88 | int ret = 0; | ||
89 | int i; | ||
90 | |||
91 | DRM_DEBUG("\n"); | ||
92 | |||
93 | if (flags & _DRM_LOCK_FLUSH) { | ||
94 | ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT); | ||
95 | if (!ret) ret = DRM(flush_queue)(dev, context); | ||
96 | } | ||
97 | if (flags & _DRM_LOCK_FLUSH_ALL) { | ||
98 | for (i = 0; !ret && i < dev->queue_count; i++) { | ||
99 | ret = DRM(flush_queue)(dev, i); | ||
100 | } | ||
101 | } | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags) | ||
106 | { | ||
107 | int ret = 0; | ||
108 | int i; | ||
109 | |||
110 | DRM_DEBUG("\n"); | ||
111 | |||
112 | if (flags & _DRM_LOCK_FLUSH) { | ||
113 | ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT); | ||
114 | if (!ret) ret = DRM(flush_unblock_queue)(dev, context); | ||
115 | } | ||
116 | if (flags & _DRM_LOCK_FLUSH_ALL) { | ||
117 | for (i = 0; !ret && i < dev->queue_count; i++) { | ||
118 | ret = DRM(flush_unblock_queue)(dev, i); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd, | ||
126 | unsigned long arg) | ||
127 | { | ||
128 | drm_file_t *priv = filp->private_data; | ||
129 | drm_device_t *dev = priv->dev; | ||
130 | int ret = 0; | ||
131 | drm_lock_t lock; | ||
132 | |||
133 | DRM_DEBUG("\n"); | ||
134 | |||
135 | if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock))) | ||
136 | return -EFAULT; | ||
137 | ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags); | ||
138 | DRM(flush_unblock)(dev, lock.context, lock.flags); | ||
139 | return ret; | ||
140 | } | ||
diff --git a/drivers/char/drm/gamma_old_dma.h b/drivers/char/drm/gamma_old_dma.h deleted file mode 100644 index abdd454aab9f..000000000000 --- a/drivers/char/drm/gamma_old_dma.h +++ /dev/null | |||
@@ -1,313 +0,0 @@ | |||
1 | /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*- | ||
2 | * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com | ||
3 | * | ||
4 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | |||
33 | /* Gamma-specific code pulled from drm_dma.h: | ||
34 | */ | ||
35 | |||
36 | void DRM(clear_next_buffer)(drm_device_t *dev) | ||
37 | { | ||
38 | drm_device_dma_t *dma = dev->dma; | ||
39 | |||
40 | dma->next_buffer = NULL; | ||
41 | if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) { | ||
42 | wake_up_interruptible(&dma->next_queue->flush_queue); | ||
43 | } | ||
44 | dma->next_queue = NULL; | ||
45 | } | ||
46 | |||
47 | int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long)) | ||
48 | { | ||
49 | int i; | ||
50 | int candidate = -1; | ||
51 | int j = jiffies; | ||
52 | |||
53 | if (!dev) { | ||
54 | DRM_ERROR("No device\n"); | ||
55 | return -1; | ||
56 | } | ||
57 | if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) { | ||
58 | /* This only happens between the time the | ||
59 | interrupt is initialized and the time | ||
60 | the queues are initialized. */ | ||
61 | return -1; | ||
62 | } | ||
63 | |||
64 | /* Doing "while locked" DMA? */ | ||
65 | if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) { | ||
66 | return DRM_KERNEL_CONTEXT; | ||
67 | } | ||
68 | |||
69 | /* If there are buffers on the last_context | ||
70 | queue, and we have not been executing | ||
71 | this context very long, continue to | ||
72 | execute this context. */ | ||
73 | if (dev->last_switch <= j | ||
74 | && dev->last_switch + DRM_TIME_SLICE > j | ||
75 | && DRM_WAITCOUNT(dev, dev->last_context)) { | ||
76 | return dev->last_context; | ||
77 | } | ||
78 | |||
79 | /* Otherwise, find a candidate */ | ||
80 | for (i = dev->last_checked + 1; i < dev->queue_count; i++) { | ||
81 | if (DRM_WAITCOUNT(dev, i)) { | ||
82 | candidate = dev->last_checked = i; | ||
83 | break; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | if (candidate < 0) { | ||
88 | for (i = 0; i < dev->queue_count; i++) { | ||
89 | if (DRM_WAITCOUNT(dev, i)) { | ||
90 | candidate = dev->last_checked = i; | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | } | ||
95 | |||
96 | if (wrapper | ||
97 | && candidate >= 0 | ||
98 | && candidate != dev->last_context | ||
99 | && dev->last_switch <= j | ||
100 | && dev->last_switch + DRM_TIME_SLICE > j) { | ||
101 | if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) { | ||
102 | del_timer(&dev->timer); | ||
103 | dev->timer.function = wrapper; | ||
104 | dev->timer.data = (unsigned long)dev; | ||
105 | dev->timer.expires = dev->last_switch+DRM_TIME_SLICE; | ||
106 | add_timer(&dev->timer); | ||
107 | } | ||
108 | return -1; | ||
109 | } | ||
110 | |||
111 | return candidate; | ||
112 | } | ||
113 | |||
114 | |||
115 | int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d) | ||
116 | { | ||
117 | drm_file_t *priv = filp->private_data; | ||
118 | drm_device_t *dev = priv->dev; | ||
119 | int i; | ||
120 | drm_queue_t *q; | ||
121 | drm_buf_t *buf; | ||
122 | int idx; | ||
123 | int while_locked = 0; | ||
124 | drm_device_dma_t *dma = dev->dma; | ||
125 | int *ind; | ||
126 | int err; | ||
127 | DECLARE_WAITQUEUE(entry, current); | ||
128 | |||
129 | DRM_DEBUG("%d\n", d->send_count); | ||
130 | |||
131 | if (d->flags & _DRM_DMA_WHILE_LOCKED) { | ||
132 | int context = dev->lock.hw_lock->lock; | ||
133 | |||
134 | if (!_DRM_LOCK_IS_HELD(context)) { | ||
135 | DRM_ERROR("No lock held during \"while locked\"" | ||
136 | " request\n"); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | if (d->context != _DRM_LOCKING_CONTEXT(context) | ||
140 | && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) { | ||
141 | DRM_ERROR("Lock held by %d while %d makes" | ||
142 | " \"while locked\" request\n", | ||
143 | _DRM_LOCKING_CONTEXT(context), | ||
144 | d->context); | ||
145 | return -EINVAL; | ||
146 | } | ||
147 | q = dev->queuelist[DRM_KERNEL_CONTEXT]; | ||
148 | while_locked = 1; | ||
149 | } else { | ||
150 | q = dev->queuelist[d->context]; | ||
151 | } | ||
152 | |||
153 | |||
154 | atomic_inc(&q->use_count); | ||
155 | if (atomic_read(&q->block_write)) { | ||
156 | add_wait_queue(&q->write_queue, &entry); | ||
157 | atomic_inc(&q->block_count); | ||
158 | for (;;) { | ||
159 | current->state = TASK_INTERRUPTIBLE; | ||
160 | if (!atomic_read(&q->block_write)) break; | ||
161 | schedule(); | ||
162 | if (signal_pending(current)) { | ||
163 | atomic_dec(&q->use_count); | ||
164 | remove_wait_queue(&q->write_queue, &entry); | ||
165 | return -EINTR; | ||
166 | } | ||
167 | } | ||
168 | atomic_dec(&q->block_count); | ||
169 | current->state = TASK_RUNNING; | ||
170 | remove_wait_queue(&q->write_queue, &entry); | ||
171 | } | ||
172 | |||
173 | ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER); | ||
174 | if (!ind) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) { | ||
178 | err = -EFAULT; | ||
179 | goto out; | ||
180 | } | ||
181 | |||
182 | err = -EINVAL; | ||
183 | for (i = 0; i < d->send_count; i++) { | ||
184 | idx = ind[i]; | ||
185 | if (idx < 0 || idx >= dma->buf_count) { | ||
186 | DRM_ERROR("Index %d (of %d max)\n", | ||
187 | ind[i], dma->buf_count - 1); | ||
188 | goto out; | ||
189 | } | ||
190 | buf = dma->buflist[ idx ]; | ||
191 | if (buf->filp != filp) { | ||
192 | DRM_ERROR("Process %d using buffer not owned\n", | ||
193 | current->pid); | ||
194 | goto out; | ||
195 | } | ||
196 | if (buf->list != DRM_LIST_NONE) { | ||
197 | DRM_ERROR("Process %d using buffer %d on list %d\n", | ||
198 | current->pid, buf->idx, buf->list); | ||
199 | goto out; | ||
200 | } | ||
201 | buf->used = ind[i]; | ||
202 | buf->while_locked = while_locked; | ||
203 | buf->context = d->context; | ||
204 | if (!buf->used) { | ||
205 | DRM_ERROR("Queueing 0 length buffer\n"); | ||
206 | } | ||
207 | if (buf->pending) { | ||
208 | DRM_ERROR("Queueing pending buffer:" | ||
209 | " buffer %d, offset %d\n", | ||
210 | ind[i], i); | ||
211 | goto out; | ||
212 | } | ||
213 | if (buf->waiting) { | ||
214 | DRM_ERROR("Queueing waiting buffer:" | ||
215 | " buffer %d, offset %d\n", | ||
216 | ind[i], i); | ||
217 | goto out; | ||
218 | } | ||
219 | buf->waiting = 1; | ||
220 | if (atomic_read(&q->use_count) == 1 | ||
221 | || atomic_read(&q->finalization)) { | ||
222 | DRM(free_buffer)(dev, buf); | ||
223 | } else { | ||
224 | DRM(waitlist_put)(&q->waitlist, buf); | ||
225 | atomic_inc(&q->total_queued); | ||
226 | } | ||
227 | } | ||
228 | atomic_dec(&q->use_count); | ||
229 | |||
230 | return 0; | ||
231 | |||
232 | out: | ||
233 | DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER); | ||
234 | atomic_dec(&q->use_count); | ||
235 | return err; | ||
236 | } | ||
237 | |||
238 | static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d, | ||
239 | int order) | ||
240 | { | ||
241 | drm_file_t *priv = filp->private_data; | ||
242 | drm_device_t *dev = priv->dev; | ||
243 | int i; | ||
244 | drm_buf_t *buf; | ||
245 | drm_device_dma_t *dma = dev->dma; | ||
246 | |||
247 | for (i = d->granted_count; i < d->request_count; i++) { | ||
248 | buf = DRM(freelist_get)(&dma->bufs[order].freelist, | ||
249 | d->flags & _DRM_DMA_WAIT); | ||
250 | if (!buf) break; | ||
251 | if (buf->pending || buf->waiting) { | ||
252 | DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n", | ||
253 | buf->idx, | ||
254 | buf->filp, | ||
255 | buf->waiting, | ||
256 | buf->pending); | ||
257 | } | ||
258 | buf->filp = filp; | ||
259 | if (copy_to_user(&d->request_indices[i], | ||
260 | &buf->idx, | ||
261 | sizeof(buf->idx))) | ||
262 | return -EFAULT; | ||
263 | |||
264 | if (copy_to_user(&d->request_sizes[i], | ||
265 | &buf->total, | ||
266 | sizeof(buf->total))) | ||
267 | return -EFAULT; | ||
268 | |||
269 | ++d->granted_count; | ||
270 | } | ||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | |||
275 | int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma) | ||
276 | { | ||
277 | int order; | ||
278 | int retcode = 0; | ||
279 | int tmp_order; | ||
280 | |||
281 | order = DRM(order)(dma->request_size); | ||
282 | |||
283 | dma->granted_count = 0; | ||
284 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, order); | ||
285 | |||
286 | if (dma->granted_count < dma->request_count | ||
287 | && (dma->flags & _DRM_DMA_SMALLER_OK)) { | ||
288 | for (tmp_order = order - 1; | ||
289 | !retcode | ||
290 | && dma->granted_count < dma->request_count | ||
291 | && tmp_order >= DRM_MIN_ORDER; | ||
292 | --tmp_order) { | ||
293 | |||
294 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, | ||
295 | tmp_order); | ||
296 | } | ||
297 | } | ||
298 | |||
299 | if (dma->granted_count < dma->request_count | ||
300 | && (dma->flags & _DRM_DMA_LARGER_OK)) { | ||
301 | for (tmp_order = order + 1; | ||
302 | !retcode | ||
303 | && dma->granted_count < dma->request_count | ||
304 | && tmp_order <= DRM_MAX_ORDER; | ||
305 | ++tmp_order) { | ||
306 | |||
307 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, | ||
308 | tmp_order); | ||
309 | } | ||
310 | } | ||
311 | return 0; | ||
312 | } | ||
313 | |||