diff options
Diffstat (limited to 'drivers/char/drm')
-rw-r--r-- | drivers/char/drm/drmP.h | 21 | ||||
-rw-r--r-- | drivers/char/drm/drm_fops.c | 90 | ||||
-rw-r--r-- | drivers/char/drm/drm_irq.c | 4 | ||||
-rw-r--r-- | drivers/char/drm/drm_lock.c | 134 | ||||
-rw-r--r-- | drivers/char/drm/drm_stub.c | 1 | ||||
-rw-r--r-- | drivers/char/drm/sis_drv.c | 2 | ||||
-rw-r--r-- | drivers/char/drm/via_drv.c | 3 |
7 files changed, 174 insertions, 81 deletions
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index 09705da8cdd7..80041d5b792d 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h | |||
@@ -414,6 +414,10 @@ typedef struct drm_lock_data { | |||
414 | struct file *filp; /**< File descr of lock holder (0=kernel) */ | 414 | struct file *filp; /**< File descr of lock holder (0=kernel) */ |
415 | wait_queue_head_t lock_queue; /**< Queue of blocked processes */ | 415 | wait_queue_head_t lock_queue; /**< Queue of blocked processes */ |
416 | unsigned long lock_time; /**< Time of last lock in jiffies */ | 416 | unsigned long lock_time; /**< Time of last lock in jiffies */ |
417 | spinlock_t spinlock; | ||
418 | uint32_t kernel_waiters; | ||
419 | uint32_t user_waiters; | ||
420 | int idle_has_lock; | ||
417 | } drm_lock_data_t; | 421 | } drm_lock_data_t; |
418 | 422 | ||
419 | /** | 423 | /** |
@@ -590,6 +594,8 @@ struct drm_driver { | |||
590 | void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); | 594 | void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); |
591 | void (*reclaim_buffers_locked) (struct drm_device *dev, | 595 | void (*reclaim_buffers_locked) (struct drm_device *dev, |
592 | struct file *filp); | 596 | struct file *filp); |
597 | void (*reclaim_buffers_idlelocked) (struct drm_device *dev, | ||
598 | struct file * filp); | ||
593 | unsigned long (*get_map_ofs) (drm_map_t * map); | 599 | unsigned long (*get_map_ofs) (drm_map_t * map); |
594 | unsigned long (*get_reg_ofs) (struct drm_device * dev); | 600 | unsigned long (*get_reg_ofs) (struct drm_device * dev); |
595 | void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); | 601 | void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); |
@@ -915,9 +921,18 @@ extern int drm_lock(struct inode *inode, struct file *filp, | |||
915 | unsigned int cmd, unsigned long arg); | 921 | unsigned int cmd, unsigned long arg); |
916 | extern int drm_unlock(struct inode *inode, struct file *filp, | 922 | extern int drm_unlock(struct inode *inode, struct file *filp, |
917 | unsigned int cmd, unsigned long arg); | 923 | unsigned int cmd, unsigned long arg); |
918 | extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context); | 924 | extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context); |
919 | extern int drm_lock_free(drm_device_t * dev, | 925 | extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context); |
920 | __volatile__ unsigned int *lock, unsigned int context); | 926 | extern void drm_idlelock_take(drm_lock_data_t *lock_data); |
927 | extern void drm_idlelock_release(drm_lock_data_t *lock_data); | ||
928 | |||
929 | /* | ||
930 | * These are exported to drivers so that they can implement fencing using | ||
931 | * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. | ||
932 | */ | ||
933 | |||
934 | extern int drm_i_have_hw_lock(struct file *filp); | ||
935 | extern int drm_kernel_take_hw_lock(struct file *filp); | ||
921 | 936 | ||
922 | /* Buffer management support (drm_bufs.h) */ | 937 | /* Buffer management support (drm_bufs.h) */ |
923 | extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); | 938 | extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); |
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c index 314abd9d6510..3b159cab3bc8 100644 --- a/drivers/char/drm/drm_fops.c +++ b/drivers/char/drm/drm_fops.c | |||
@@ -356,58 +356,56 @@ int drm_release(struct inode *inode, struct file *filp) | |||
356 | current->pid, (long)old_encode_dev(priv->head->device), | 356 | current->pid, (long)old_encode_dev(priv->head->device), |
357 | dev->open_count); | 357 | dev->open_count); |
358 | 358 | ||
359 | if (priv->lock_count && dev->lock.hw_lock && | 359 | if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { |
360 | _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && | 360 | if (drm_i_have_hw_lock(filp)) { |
361 | dev->lock.filp == filp) { | ||
362 | DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||
363 | filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
364 | |||
365 | if (dev->driver->reclaim_buffers_locked) | ||
366 | dev->driver->reclaim_buffers_locked(dev, filp); | 361 | dev->driver->reclaim_buffers_locked(dev, filp); |
367 | 362 | } else { | |
368 | drm_lock_free(dev, &dev->lock.hw_lock->lock, | 363 | unsigned long _end=jiffies + 3*DRM_HZ; |
369 | _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | 364 | int locked = 0; |
370 | 365 | ||
371 | /* FIXME: may require heavy-handed reset of | 366 | drm_idlelock_take(&dev->lock); |
372 | hardware at this point, possibly | 367 | |
373 | processed via a callback to the X | 368 | /* |
374 | server. */ | 369 | * Wait for a while. |
375 | } else if (dev->driver->reclaim_buffers_locked && priv->lock_count | 370 | */ |
376 | && dev->lock.hw_lock) { | 371 | |
377 | /* The lock is required to reclaim buffers */ | 372 | do{ |
378 | DECLARE_WAITQUEUE(entry, current); | 373 | spin_lock(&dev->lock.spinlock); |
379 | 374 | locked = dev->lock.idle_has_lock; | |
380 | add_wait_queue(&dev->lock.lock_queue, &entry); | 375 | spin_unlock(&dev->lock.spinlock); |
381 | for (;;) { | 376 | if (locked) |
382 | __set_current_state(TASK_INTERRUPTIBLE); | 377 | break; |
383 | if (!dev->lock.hw_lock) { | 378 | schedule(); |
384 | /* Device has been unregistered */ | 379 | } while (!time_after_eq(jiffies, _end)); |
385 | retcode = -EINTR; | 380 | |
386 | break; | 381 | if (!locked) { |
382 | DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" | ||
383 | "\tdriver to use reclaim_buffers_idlelocked() instead.\n" | ||
384 | "\tI will go on reclaiming the buffers anyway.\n"); | ||
387 | } | 385 | } |
388 | if (drm_lock_take(&dev->lock.hw_lock->lock, | 386 | |
389 | DRM_KERNEL_CONTEXT)) { | ||
390 | dev->lock.filp = filp; | ||
391 | dev->lock.lock_time = jiffies; | ||
392 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | ||
393 | break; /* Got lock */ | ||
394 | } | ||
395 | /* Contention */ | ||
396 | schedule(); | ||
397 | if (signal_pending(current)) { | ||
398 | retcode = -ERESTARTSYS; | ||
399 | break; | ||
400 | } | ||
401 | } | ||
402 | __set_current_state(TASK_RUNNING); | ||
403 | remove_wait_queue(&dev->lock.lock_queue, &entry); | ||
404 | if (!retcode) { | ||
405 | dev->driver->reclaim_buffers_locked(dev, filp); | 387 | dev->driver->reclaim_buffers_locked(dev, filp); |
406 | drm_lock_free(dev, &dev->lock.hw_lock->lock, | 388 | drm_idlelock_release(&dev->lock); |
407 | DRM_KERNEL_CONTEXT); | ||
408 | } | 389 | } |
409 | } | 390 | } |
410 | 391 | ||
392 | if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { | ||
393 | |||
394 | drm_idlelock_take(&dev->lock); | ||
395 | dev->driver->reclaim_buffers_idlelocked(dev, filp); | ||
396 | drm_idlelock_release(&dev->lock); | ||
397 | |||
398 | } | ||
399 | |||
400 | if (drm_i_have_hw_lock(filp)) { | ||
401 | DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||
402 | filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
403 | |||
404 | drm_lock_free(&dev->lock, | ||
405 | _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
406 | } | ||
407 | |||
408 | |||
411 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && | 409 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && |
412 | !dev->driver->reclaim_buffers_locked) { | 410 | !dev->driver->reclaim_buffers_locked) { |
413 | dev->driver->reclaim_buffers(dev, filp); | 411 | dev->driver->reclaim_buffers(dev, filp); |
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c index 9d00c51fe2c4..2e75331fd83e 100644 --- a/drivers/char/drm/drm_irq.c +++ b/drivers/char/drm/drm_irq.c | |||
@@ -424,7 +424,7 @@ static void drm_locked_tasklet_func(unsigned long data) | |||
424 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | 424 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); |
425 | 425 | ||
426 | if (!dev->locked_tasklet_func || | 426 | if (!dev->locked_tasklet_func || |
427 | !drm_lock_take(&dev->lock.hw_lock->lock, | 427 | !drm_lock_take(&dev->lock, |
428 | DRM_KERNEL_CONTEXT)) { | 428 | DRM_KERNEL_CONTEXT)) { |
429 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | 429 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); |
430 | return; | 430 | return; |
@@ -435,7 +435,7 @@ static void drm_locked_tasklet_func(unsigned long data) | |||
435 | 435 | ||
436 | dev->locked_tasklet_func(dev); | 436 | dev->locked_tasklet_func(dev); |
437 | 437 | ||
438 | drm_lock_free(dev, &dev->lock.hw_lock->lock, | 438 | drm_lock_free(&dev->lock, |
439 | DRM_KERNEL_CONTEXT); | 439 | DRM_KERNEL_CONTEXT); |
440 | 440 | ||
441 | dev->locked_tasklet_func = NULL; | 441 | dev->locked_tasklet_func = NULL; |
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c index e9993ba461a2..befd1af19dfe 100644 --- a/drivers/char/drm/drm_lock.c +++ b/drivers/char/drm/drm_lock.c | |||
@@ -35,9 +35,6 @@ | |||
35 | 35 | ||
36 | #include "drmP.h" | 36 | #include "drmP.h" |
37 | 37 | ||
38 | static int drm_lock_transfer(drm_device_t * dev, | ||
39 | __volatile__ unsigned int *lock, | ||
40 | unsigned int context); | ||
41 | static int drm_notifier(void *priv); | 38 | static int drm_notifier(void *priv); |
42 | 39 | ||
43 | /** | 40 | /** |
@@ -80,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
80 | return -EINVAL; | 77 | return -EINVAL; |
81 | 78 | ||
82 | add_wait_queue(&dev->lock.lock_queue, &entry); | 79 | add_wait_queue(&dev->lock.lock_queue, &entry); |
80 | spin_lock(&dev->lock.spinlock); | ||
81 | dev->lock.user_waiters++; | ||
82 | spin_unlock(&dev->lock.spinlock); | ||
83 | for (;;) { | 83 | for (;;) { |
84 | __set_current_state(TASK_INTERRUPTIBLE); | 84 | __set_current_state(TASK_INTERRUPTIBLE); |
85 | if (!dev->lock.hw_lock) { | 85 | if (!dev->lock.hw_lock) { |
@@ -87,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
87 | ret = -EINTR; | 87 | ret = -EINTR; |
88 | break; | 88 | break; |
89 | } | 89 | } |
90 | if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { | 90 | if (drm_lock_take(&dev->lock, lock.context)) { |
91 | dev->lock.filp = filp; | 91 | dev->lock.filp = filp; |
92 | dev->lock.lock_time = jiffies; | 92 | dev->lock.lock_time = jiffies; |
93 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | 93 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); |
@@ -101,12 +101,14 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
101 | break; | 101 | break; |
102 | } | 102 | } |
103 | } | 103 | } |
104 | spin_lock(&dev->lock.spinlock); | ||
105 | dev->lock.user_waiters--; | ||
106 | spin_unlock(&dev->lock.spinlock); | ||
104 | __set_current_state(TASK_RUNNING); | 107 | __set_current_state(TASK_RUNNING); |
105 | remove_wait_queue(&dev->lock.lock_queue, &entry); | 108 | remove_wait_queue(&dev->lock.lock_queue, &entry); |
106 | 109 | ||
107 | DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); | 110 | DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); |
108 | if (ret) | 111 | if (ret) return ret; |
109 | return ret; | ||
110 | 112 | ||
111 | sigemptyset(&dev->sigmask); | 113 | sigemptyset(&dev->sigmask); |
112 | sigaddset(&dev->sigmask, SIGSTOP); | 114 | sigaddset(&dev->sigmask, SIGSTOP); |
@@ -127,14 +129,12 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
127 | } | 129 | } |
128 | } | 130 | } |
129 | 131 | ||
130 | /* dev->driver->kernel_context_switch isn't used by any of the x86 | ||
131 | * drivers but is used by the Sparc driver. | ||
132 | */ | ||
133 | if (dev->driver->kernel_context_switch && | 132 | if (dev->driver->kernel_context_switch && |
134 | dev->last_context != lock.context) { | 133 | dev->last_context != lock.context) { |
135 | dev->driver->kernel_context_switch(dev, dev->last_context, | 134 | dev->driver->kernel_context_switch(dev, dev->last_context, |
136 | lock.context); | 135 | lock.context); |
137 | } | 136 | } |
137 | |||
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
@@ -184,12 +184,8 @@ int drm_unlock(struct inode *inode, struct file *filp, | |||
184 | if (dev->driver->kernel_context_switch_unlock) | 184 | if (dev->driver->kernel_context_switch_unlock) |
185 | dev->driver->kernel_context_switch_unlock(dev); | 185 | dev->driver->kernel_context_switch_unlock(dev); |
186 | else { | 186 | else { |
187 | drm_lock_transfer(dev, &dev->lock.hw_lock->lock, | 187 | if (drm_lock_free(&dev->lock,lock.context)) { |
188 | DRM_KERNEL_CONTEXT); | 188 | /* FIXME: Should really bail out here. */ |
189 | |||
190 | if (drm_lock_free(dev, &dev->lock.hw_lock->lock, | ||
191 | DRM_KERNEL_CONTEXT)) { | ||
192 | DRM_ERROR("\n"); | ||
193 | } | 189 | } |
194 | } | 190 | } |
195 | 191 | ||
@@ -206,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp, | |||
206 | * | 202 | * |
207 | * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. | 203 | * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. |
208 | */ | 204 | */ |
209 | int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) | 205 | int drm_lock_take(drm_lock_data_t *lock_data, |
206 | unsigned int context) | ||
210 | { | 207 | { |
211 | unsigned int old, new, prev; | 208 | unsigned int old, new, prev; |
209 | volatile unsigned int *lock = &lock_data->hw_lock->lock; | ||
212 | 210 | ||
211 | spin_lock(&lock_data->spinlock); | ||
213 | do { | 212 | do { |
214 | old = *lock; | 213 | old = *lock; |
215 | if (old & _DRM_LOCK_HELD) | 214 | if (old & _DRM_LOCK_HELD) |
216 | new = old | _DRM_LOCK_CONT; | 215 | new = old | _DRM_LOCK_CONT; |
217 | else | 216 | else { |
218 | new = context | _DRM_LOCK_HELD; | 217 | new = context | _DRM_LOCK_HELD | |
218 | ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? | ||
219 | _DRM_LOCK_CONT : 0); | ||
220 | } | ||
219 | prev = cmpxchg(lock, old, new); | 221 | prev = cmpxchg(lock, old, new); |
220 | } while (prev != old); | 222 | } while (prev != old); |
223 | spin_unlock(&lock_data->spinlock); | ||
224 | |||
221 | if (_DRM_LOCKING_CONTEXT(old) == context) { | 225 | if (_DRM_LOCKING_CONTEXT(old) == context) { |
222 | if (old & _DRM_LOCK_HELD) { | 226 | if (old & _DRM_LOCK_HELD) { |
223 | if (context != DRM_KERNEL_CONTEXT) { | 227 | if (context != DRM_KERNEL_CONTEXT) { |
@@ -227,7 +231,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) | |||
227 | return 0; | 231 | return 0; |
228 | } | 232 | } |
229 | } | 233 | } |
230 | if (new == (context | _DRM_LOCK_HELD)) { | 234 | |
235 | if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { | ||
231 | /* Have lock */ | 236 | /* Have lock */ |
232 | return 1; | 237 | return 1; |
233 | } | 238 | } |
@@ -246,13 +251,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) | |||
246 | * Resets the lock file pointer. | 251 | * Resets the lock file pointer. |
247 | * Marks the lock as held by the given context, via the \p cmpxchg instruction. | 252 | * Marks the lock as held by the given context, via the \p cmpxchg instruction. |
248 | */ | 253 | */ |
249 | static int drm_lock_transfer(drm_device_t * dev, | 254 | static int drm_lock_transfer(drm_lock_data_t *lock_data, |
250 | __volatile__ unsigned int *lock, | ||
251 | unsigned int context) | 255 | unsigned int context) |
252 | { | 256 | { |
253 | unsigned int old, new, prev; | 257 | unsigned int old, new, prev; |
258 | volatile unsigned int *lock = &lock_data->hw_lock->lock; | ||
254 | 259 | ||
255 | dev->lock.filp = NULL; | 260 | lock_data->filp = NULL; |
256 | do { | 261 | do { |
257 | old = *lock; | 262 | old = *lock; |
258 | new = context | _DRM_LOCK_HELD; | 263 | new = context | _DRM_LOCK_HELD; |
@@ -272,23 +277,32 @@ static int drm_lock_transfer(drm_device_t * dev, | |||
272 | * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task | 277 | * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task |
273 | * waiting on the lock queue. | 278 | * waiting on the lock queue. |
274 | */ | 279 | */ |
275 | int drm_lock_free(drm_device_t * dev, | 280 | int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) |
276 | __volatile__ unsigned int *lock, unsigned int context) | ||
277 | { | 281 | { |
278 | unsigned int old, new, prev; | 282 | unsigned int old, new, prev; |
283 | volatile unsigned int *lock = &lock_data->hw_lock->lock; | ||
284 | |||
285 | spin_lock(&lock_data->spinlock); | ||
286 | if (lock_data->kernel_waiters != 0) { | ||
287 | drm_lock_transfer(lock_data, 0); | ||
288 | lock_data->idle_has_lock = 1; | ||
289 | spin_unlock(&lock_data->spinlock); | ||
290 | return 1; | ||
291 | } | ||
292 | spin_unlock(&lock_data->spinlock); | ||
279 | 293 | ||
280 | dev->lock.filp = NULL; | ||
281 | do { | 294 | do { |
282 | old = *lock; | 295 | old = *lock; |
283 | new = 0; | 296 | new = _DRM_LOCKING_CONTEXT(old); |
284 | prev = cmpxchg(lock, old, new); | 297 | prev = cmpxchg(lock, old, new); |
285 | } while (prev != old); | 298 | } while (prev != old); |
299 | |||
286 | if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { | 300 | if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { |
287 | DRM_ERROR("%d freed heavyweight lock held by %d\n", | 301 | DRM_ERROR("%d freed heavyweight lock held by %d\n", |
288 | context, _DRM_LOCKING_CONTEXT(old)); | 302 | context, _DRM_LOCKING_CONTEXT(old)); |
289 | return 1; | 303 | return 1; |
290 | } | 304 | } |
291 | wake_up_interruptible(&dev->lock.lock_queue); | 305 | wake_up_interruptible(&lock_data->lock_queue); |
292 | return 0; | 306 | return 0; |
293 | } | 307 | } |
294 | 308 | ||
@@ -322,3 +336,67 @@ static int drm_notifier(void *priv) | |||
322 | } while (prev != old); | 336 | } while (prev != old); |
323 | return 0; | 337 | return 0; |
324 | } | 338 | } |
339 | |||
340 | /** | ||
341 | * This function returns immediately and takes the hw lock | ||
342 | * with the kernel context if it is free, otherwise it gets the highest priority when and if | ||
343 | * it is eventually released. | ||
344 | * | ||
345 | * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held | ||
346 | * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause | ||
347 | * a deadlock, which is why the "idlelock" was invented). | ||
348 | * | ||
349 | * This should be sufficient to wait for GPU idle without | ||
350 | * having to worry about starvation. | ||
351 | */ | ||
352 | |||
353 | void drm_idlelock_take(drm_lock_data_t *lock_data) | ||
354 | { | ||
355 | int ret = 0; | ||
356 | |||
357 | spin_lock(&lock_data->spinlock); | ||
358 | lock_data->kernel_waiters++; | ||
359 | if (!lock_data->idle_has_lock) { | ||
360 | |||
361 | spin_unlock(&lock_data->spinlock); | ||
362 | ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); | ||
363 | spin_lock(&lock_data->spinlock); | ||
364 | |||
365 | if (ret == 1) | ||
366 | lock_data->idle_has_lock = 1; | ||
367 | } | ||
368 | spin_unlock(&lock_data->spinlock); | ||
369 | } | ||
370 | EXPORT_SYMBOL(drm_idlelock_take); | ||
371 | |||
372 | void drm_idlelock_release(drm_lock_data_t *lock_data) | ||
373 | { | ||
374 | unsigned int old, prev; | ||
375 | volatile unsigned int *lock = &lock_data->hw_lock->lock; | ||
376 | |||
377 | spin_lock(&lock_data->spinlock); | ||
378 | if (--lock_data->kernel_waiters == 0) { | ||
379 | if (lock_data->idle_has_lock) { | ||
380 | do { | ||
381 | old = *lock; | ||
382 | prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); | ||
383 | } while (prev != old); | ||
384 | wake_up_interruptible(&lock_data->lock_queue); | ||
385 | lock_data->idle_has_lock = 0; | ||
386 | } | ||
387 | } | ||
388 | spin_unlock(&lock_data->spinlock); | ||
389 | } | ||
390 | EXPORT_SYMBOL(drm_idlelock_release); | ||
391 | |||
392 | |||
393 | int drm_i_have_hw_lock(struct file *filp) | ||
394 | { | ||
395 | DRM_DEVICE; | ||
396 | |||
397 | return (priv->lock_count && dev->lock.hw_lock && | ||
398 | _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && | ||
399 | dev->lock.filp == filp); | ||
400 | } | ||
401 | |||
402 | EXPORT_SYMBOL(drm_i_have_hw_lock); | ||
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index 120d10256feb..19408adcc775 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c | |||
@@ -62,6 +62,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, | |||
62 | spin_lock_init(&dev->count_lock); | 62 | spin_lock_init(&dev->count_lock); |
63 | spin_lock_init(&dev->drw_lock); | 63 | spin_lock_init(&dev->drw_lock); |
64 | spin_lock_init(&dev->tasklet_lock); | 64 | spin_lock_init(&dev->tasklet_lock); |
65 | spin_lock_init(&dev->lock.spinlock); | ||
65 | init_timer(&dev->timer); | 66 | init_timer(&dev->timer); |
66 | mutex_init(&dev->struct_mutex); | 67 | mutex_init(&dev->struct_mutex); |
67 | mutex_init(&dev->ctxlist_mutex); | 68 | mutex_init(&dev->ctxlist_mutex); |
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c index 3d5b3218b6ff..690e0af8e7c2 100644 --- a/drivers/char/drm/sis_drv.c +++ b/drivers/char/drm/sis_drv.c | |||
@@ -71,7 +71,7 @@ static struct drm_driver driver = { | |||
71 | .context_dtor = NULL, | 71 | .context_dtor = NULL, |
72 | .dma_quiescent = sis_idle, | 72 | .dma_quiescent = sis_idle, |
73 | .reclaim_buffers = NULL, | 73 | .reclaim_buffers = NULL, |
74 | .reclaim_buffers_locked = sis_reclaim_buffers_locked, | 74 | .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, |
75 | .lastclose = sis_lastclose, | 75 | .lastclose = sis_lastclose, |
76 | .get_map_ofs = drm_core_get_map_ofs, | 76 | .get_map_ofs = drm_core_get_map_ofs, |
77 | .get_reg_ofs = drm_core_get_reg_ofs, | 77 | .get_reg_ofs = drm_core_get_reg_ofs, |
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c index bb9dde8b1911..2d4957ab256a 100644 --- a/drivers/char/drm/via_drv.c +++ b/drivers/char/drm/via_drv.c | |||
@@ -52,7 +52,8 @@ static struct drm_driver driver = { | |||
52 | .dma_quiescent = via_driver_dma_quiescent, | 52 | .dma_quiescent = via_driver_dma_quiescent, |
53 | .dri_library_name = dri_library_name, | 53 | .dri_library_name = dri_library_name, |
54 | .reclaim_buffers = drm_core_reclaim_buffers, | 54 | .reclaim_buffers = drm_core_reclaim_buffers, |
55 | .reclaim_buffers_locked = via_reclaim_buffers_locked, | 55 | .reclaim_buffers_locked = NULL, |
56 | .reclaim_buffers_idlelocked = via_reclaim_buffers_locked, | ||
56 | .lastclose = via_lastclose, | 57 | .lastclose = via_lastclose, |
57 | .get_map_ofs = drm_core_get_map_ofs, | 58 | .get_map_ofs = drm_core_get_map_ofs, |
58 | .get_reg_ofs = drm_core_get_reg_ofs, | 59 | .get_reg_ofs = drm_core_get_reg_ofs, |