aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_lock.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-05-28 20:09:59 -0400
committerDave Airlie <airlied@redhat.com>2008-07-13 20:45:01 -0400
commitc0e09200dc0813972442e550a5905a132768e56c (patch)
treed38e635a30ff8b0a2b98b9d7f97cab1501f8209e /drivers/gpu/drm/drm_lock.c
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
drm: reorganise drm tree to be more future proof.
With the coming of kernel based modesetting and the memory manager stuff, the everything in one directory approach was getting very ugly and starting to be unmanageable. This restructures the drm along the lines of other kernel components. It creates a drivers/gpu/drm directory and moves the hw drivers into subdirectores. It moves the includes into an include/drm, and sets up the unifdef for the userspace headers we should be exporting. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_lock.c')
-rw-r--r--drivers/gpu/drm/drm_lock.c391
1 files changed, 391 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
new file mode 100644
index 000000000000..0998723cde79
--- /dev/null
+++ b/drivers/gpu/drm/drm_lock.c
@@ -0,0 +1,391 @@
1/**
2 * \file drm_lock.c
3 * IOCTLs for locking
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37
38static int drm_notifier(void *priv);
39
40/**
41 * Lock ioctl.
42 *
43 * \param inode device inode.
44 * \param file_priv DRM file private.
45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_lock structure.
47 * \return zero on success or negative number on failure.
48 *
49 * Add the current task to the lock wait queue, and attempt to take to lock.
50 */
51int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
52{
53 DECLARE_WAITQUEUE(entry, current);
54 struct drm_lock *lock = data;
55 int ret = 0;
56
57 ++file_priv->lock_count;
58
59 if (lock->context == DRM_KERNEL_CONTEXT) {
60 DRM_ERROR("Process %d using kernel context %d\n",
61 task_pid_nr(current), lock->context);
62 return -EINVAL;
63 }
64
65 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
66 lock->context, task_pid_nr(current),
67 dev->lock.hw_lock->lock, lock->flags);
68
69 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
70 if (lock->context < 0)
71 return -EINVAL;
72
73 add_wait_queue(&dev->lock.lock_queue, &entry);
74 spin_lock_bh(&dev->lock.spinlock);
75 dev->lock.user_waiters++;
76 spin_unlock_bh(&dev->lock.spinlock);
77 for (;;) {
78 __set_current_state(TASK_INTERRUPTIBLE);
79 if (!dev->lock.hw_lock) {
80 /* Device has been unregistered */
81 ret = -EINTR;
82 break;
83 }
84 if (drm_lock_take(&dev->lock, lock->context)) {
85 dev->lock.file_priv = file_priv;
86 dev->lock.lock_time = jiffies;
87 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
88 break; /* Got lock */
89 }
90
91 /* Contention */
92 schedule();
93 if (signal_pending(current)) {
94 ret = -ERESTARTSYS;
95 break;
96 }
97 }
98 spin_lock_bh(&dev->lock.spinlock);
99 dev->lock.user_waiters--;
100 spin_unlock_bh(&dev->lock.spinlock);
101 __set_current_state(TASK_RUNNING);
102 remove_wait_queue(&dev->lock.lock_queue, &entry);
103
104 DRM_DEBUG("%d %s\n", lock->context,
105 ret ? "interrupted" : "has lock");
106 if (ret) return ret;
107
108 sigemptyset(&dev->sigmask);
109 sigaddset(&dev->sigmask, SIGSTOP);
110 sigaddset(&dev->sigmask, SIGTSTP);
111 sigaddset(&dev->sigmask, SIGTTIN);
112 sigaddset(&dev->sigmask, SIGTTOU);
113 dev->sigdata.context = lock->context;
114 dev->sigdata.lock = dev->lock.hw_lock;
115 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
116
117 if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
118 dev->driver->dma_ready(dev);
119
120 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
121 {
122 if (dev->driver->dma_quiescent(dev)) {
123 DRM_DEBUG("%d waiting for DMA quiescent\n",
124 lock->context);
125 return -EBUSY;
126 }
127 }
128
129 if (dev->driver->kernel_context_switch &&
130 dev->last_context != lock->context) {
131 dev->driver->kernel_context_switch(dev, dev->last_context,
132 lock->context);
133 }
134
135 return 0;
136}
137
138/**
139 * Unlock ioctl.
140 *
141 * \param inode device inode.
142 * \param file_priv DRM file private.
143 * \param cmd command.
144 * \param arg user argument, pointing to a drm_lock structure.
145 * \return zero on success or negative number on failure.
146 *
147 * Transfer and free the lock.
148 */
149int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
150{
151 struct drm_lock *lock = data;
152 unsigned long irqflags;
153
154 if (lock->context == DRM_KERNEL_CONTEXT) {
155 DRM_ERROR("Process %d using kernel context %d\n",
156 task_pid_nr(current), lock->context);
157 return -EINVAL;
158 }
159
160 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
161
162 if (dev->locked_tasklet_func) {
163 dev->locked_tasklet_func(dev);
164
165 dev->locked_tasklet_func = NULL;
166 }
167
168 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
169
170 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
171
172 /* kernel_context_switch isn't used by any of the x86 drm
173 * modules but is required by the Sparc driver.
174 */
175 if (dev->driver->kernel_context_switch_unlock)
176 dev->driver->kernel_context_switch_unlock(dev);
177 else {
178 if (drm_lock_free(&dev->lock,lock->context)) {
179 /* FIXME: Should really bail out here. */
180 }
181 }
182
183 unblock_all_signals();
184 return 0;
185}
186
187/**
188 * Take the heavyweight lock.
189 *
190 * \param lock lock pointer.
191 * \param context locking context.
192 * \return one if the lock is held, or zero otherwise.
193 *
194 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
195 */
196int drm_lock_take(struct drm_lock_data *lock_data,
197 unsigned int context)
198{
199 unsigned int old, new, prev;
200 volatile unsigned int *lock = &lock_data->hw_lock->lock;
201
202 spin_lock_bh(&lock_data->spinlock);
203 do {
204 old = *lock;
205 if (old & _DRM_LOCK_HELD)
206 new = old | _DRM_LOCK_CONT;
207 else {
208 new = context | _DRM_LOCK_HELD |
209 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
210 _DRM_LOCK_CONT : 0);
211 }
212 prev = cmpxchg(lock, old, new);
213 } while (prev != old);
214 spin_unlock_bh(&lock_data->spinlock);
215
216 if (_DRM_LOCKING_CONTEXT(old) == context) {
217 if (old & _DRM_LOCK_HELD) {
218 if (context != DRM_KERNEL_CONTEXT) {
219 DRM_ERROR("%d holds heavyweight lock\n",
220 context);
221 }
222 return 0;
223 }
224 }
225
226 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
227 /* Have lock */
228 return 1;
229 }
230 return 0;
231}
232
233/**
234 * This takes a lock forcibly and hands it to context. Should ONLY be used
235 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
236 *
237 * \param dev DRM device.
238 * \param lock lock pointer.
239 * \param context locking context.
240 * \return always one.
241 *
242 * Resets the lock file pointer.
243 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
244 */
245static int drm_lock_transfer(struct drm_lock_data *lock_data,
246 unsigned int context)
247{
248 unsigned int old, new, prev;
249 volatile unsigned int *lock = &lock_data->hw_lock->lock;
250
251 lock_data->file_priv = NULL;
252 do {
253 old = *lock;
254 new = context | _DRM_LOCK_HELD;
255 prev = cmpxchg(lock, old, new);
256 } while (prev != old);
257 return 1;
258}
259
260/**
261 * Free lock.
262 *
263 * \param dev DRM device.
264 * \param lock lock.
265 * \param context context.
266 *
267 * Resets the lock file pointer.
268 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
269 * waiting on the lock queue.
270 */
271int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
272{
273 unsigned int old, new, prev;
274 volatile unsigned int *lock = &lock_data->hw_lock->lock;
275
276 spin_lock_bh(&lock_data->spinlock);
277 if (lock_data->kernel_waiters != 0) {
278 drm_lock_transfer(lock_data, 0);
279 lock_data->idle_has_lock = 1;
280 spin_unlock_bh(&lock_data->spinlock);
281 return 1;
282 }
283 spin_unlock_bh(&lock_data->spinlock);
284
285 do {
286 old = *lock;
287 new = _DRM_LOCKING_CONTEXT(old);
288 prev = cmpxchg(lock, old, new);
289 } while (prev != old);
290
291 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
292 DRM_ERROR("%d freed heavyweight lock held by %d\n",
293 context, _DRM_LOCKING_CONTEXT(old));
294 return 1;
295 }
296 wake_up_interruptible(&lock_data->lock_queue);
297 return 0;
298}
299
300/**
301 * If we get here, it means that the process has called DRM_IOCTL_LOCK
302 * without calling DRM_IOCTL_UNLOCK.
303 *
304 * If the lock is not held, then let the signal proceed as usual. If the lock
305 * is held, then set the contended flag and keep the signal blocked.
306 *
307 * \param priv pointer to a drm_sigdata structure.
308 * \return one if the signal should be delivered normally, or zero if the
309 * signal should be blocked.
310 */
311static int drm_notifier(void *priv)
312{
313 struct drm_sigdata *s = (struct drm_sigdata *) priv;
314 unsigned int old, new, prev;
315
316 /* Allow signal delivery if lock isn't held */
317 if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
318 || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
319 return 1;
320
321 /* Otherwise, set flag to force call to
322 drmUnlock */
323 do {
324 old = s->lock->lock;
325 new = old | _DRM_LOCK_CONT;
326 prev = cmpxchg(&s->lock->lock, old, new);
327 } while (prev != old);
328 return 0;
329}
330
331/**
332 * This function returns immediately and takes the hw lock
333 * with the kernel context if it is free, otherwise it gets the highest priority when and if
334 * it is eventually released.
335 *
336 * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
337 * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
338 * a deadlock, which is why the "idlelock" was invented).
339 *
340 * This should be sufficient to wait for GPU idle without
341 * having to worry about starvation.
342 */
343
344void drm_idlelock_take(struct drm_lock_data *lock_data)
345{
346 int ret = 0;
347
348 spin_lock_bh(&lock_data->spinlock);
349 lock_data->kernel_waiters++;
350 if (!lock_data->idle_has_lock) {
351
352 spin_unlock_bh(&lock_data->spinlock);
353 ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
354 spin_lock_bh(&lock_data->spinlock);
355
356 if (ret == 1)
357 lock_data->idle_has_lock = 1;
358 }
359 spin_unlock_bh(&lock_data->spinlock);
360}
361EXPORT_SYMBOL(drm_idlelock_take);
362
363void drm_idlelock_release(struct drm_lock_data *lock_data)
364{
365 unsigned int old, prev;
366 volatile unsigned int *lock = &lock_data->hw_lock->lock;
367
368 spin_lock_bh(&lock_data->spinlock);
369 if (--lock_data->kernel_waiters == 0) {
370 if (lock_data->idle_has_lock) {
371 do {
372 old = *lock;
373 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
374 } while (prev != old);
375 wake_up_interruptible(&lock_data->lock_queue);
376 lock_data->idle_has_lock = 0;
377 }
378 }
379 spin_unlock_bh(&lock_data->spinlock);
380}
381EXPORT_SYMBOL(drm_idlelock_release);
382
383
384int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
385{
386 return (file_priv->lock_count && dev->lock.hw_lock &&
387 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
388 dev->lock.file_priv == file_priv);
389}
390
391EXPORT_SYMBOL(drm_i_have_hw_lock);