aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2015-11-25 09:39:02 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-11-26 09:21:06 -0500
commit83eb64c85b80959549c114365016276f318afeb2 (patch)
tree61115345021697f6b6dad4121cf0171b07b3f833
parent87069f4493b2101a71a92b7b9565f488a605a88f (diff)
drm: Drop dev->event_lock spinlock around faulting copy_to_user()
In commit cdd1cf799bd24ac0a4184549601ae302267301c5 Author: Chris Wilson <chris@chris-wilson.co.uk> Date: Thu Dec 4 21:03:25 2014 +0000 drm: Make drm_read() more robust against multithreaded races I fixed the races by serialising the use of the event by extending the dev->event_lock. However, as Thomas pointed out, the copy_to_user() may fault (even the __copy_to_user_inatomic() variant used here) and calling into the driver backend with the spinlock held is bad news. Therefore we have to drop the spinlock before the copy, but that exposes us to the old race whereby a second reader could see an out-of-order event (as the first reader may claim the first request but fail to copy it back to userspace and so on returning it to the event list it will be behind the current event being copied by the second reader). Reported-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Takashi Iwai <tiwai@suse.de> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1448462343-2072-1-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/drm_fops.c40
1 files changed, 24 insertions, 16 deletions
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c59ce4d0ef75..eb8702d39e7d 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -488,9 +488,19 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
488 if (!access_ok(VERIFY_WRITE, buffer, count)) 488 if (!access_ok(VERIFY_WRITE, buffer, count))
489 return -EFAULT; 489 return -EFAULT;
490 490
491 spin_lock_irq(&dev->event_lock);
492 for (;;) { 491 for (;;) {
493 if (list_empty(&file_priv->event_list)) { 492 struct drm_pending_event *e = NULL;
493
494 spin_lock_irq(&dev->event_lock);
495 if (!list_empty(&file_priv->event_list)) {
496 e = list_first_entry(&file_priv->event_list,
497 struct drm_pending_event, link);
498 file_priv->event_space += e->event->length;
499 list_del(&e->link);
500 }
501 spin_unlock_irq(&dev->event_lock);
502
503 if (e == NULL) {
494 if (ret) 504 if (ret)
495 break; 505 break;
496 506
@@ -499,36 +509,34 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
499 break; 509 break;
500 } 510 }
501 511
502 spin_unlock_irq(&dev->event_lock);
503 ret = wait_event_interruptible(file_priv->event_wait, 512 ret = wait_event_interruptible(file_priv->event_wait,
504 !list_empty(&file_priv->event_list)); 513 !list_empty(&file_priv->event_list));
505 spin_lock_irq(&dev->event_lock);
506 if (ret < 0) 514 if (ret < 0)
507 break; 515 break;
508 516
509 ret = 0; 517 ret = 0;
510 } else { 518 } else {
511 struct drm_pending_event *e; 519 unsigned length = e->event->length;
512 520
513 e = list_first_entry(&file_priv->event_list, 521 if (length > count - ret) {
514 struct drm_pending_event, link); 522put_back_event:
515 if (e->event->length + ret > count) 523 spin_lock_irq(&dev->event_lock);
524 file_priv->event_space -= length;
525 list_add(&e->link, &file_priv->event_list);
526 spin_unlock_irq(&dev->event_lock);
516 break; 527 break;
528 }
517 529
518 if (__copy_to_user_inatomic(buffer + ret, 530 if (copy_to_user(buffer + ret, e->event, length)) {
519 e->event, e->event->length)) {
520 if (ret == 0) 531 if (ret == 0)
521 ret = -EFAULT; 532 ret = -EFAULT;
522 break; 533 goto put_back_event;
523 } 534 }
524 535
525 file_priv->event_space += e->event->length; 536 ret += length;
526 ret += e->event->length;
527 list_del(&e->link);
528 e->destroy(e); 537 e->destroy(e);
529 } 538 }
530 } 539 }
531 spin_unlock_irq(&dev->event_lock);
532 540
533 return ret; 541 return ret;
534} 542}