aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-10-14 10:26:45 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-10-19 04:19:55 -0400
commit4f27b75d56334f33cbccff5da8372dc4aba122ba (patch)
tree9b22fab1e6cf69085a03c43a5c541db1c1658ca7 /drivers/gpu
parentfbd5a26d500c7cd8943cc5f37ccc7e49cf386053 (diff)
drm/i915: rearrange mutex acquisition for pread
... to avoid the double acquisition along fast[er] paths. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c99
1 files changed, 43 insertions, 56 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1177ff57791..efc6a4e3b1d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -265,14 +265,14 @@ fast_shmem_read(struct page **pages,
265 char __user *data, 265 char __user *data,
266 int length) 266 int length)
267{ 267{
268 int unwritten;
269 char *vaddr; 268 char *vaddr;
269 int ret;
270 270
271 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 271 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
272 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); 272 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
273 kunmap_atomic(vaddr, KM_USER0); 273 kunmap_atomic(vaddr, KM_USER0);
274 274
275 return unwritten ? -EFAULT : 0; 275 return ret;
276} 276}
277 277
278static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 278static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@@ -366,24 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
366 loff_t offset, page_base; 366 loff_t offset, page_base;
367 char __user *user_data; 367 char __user *user_data;
368 int page_offset, page_length; 368 int page_offset, page_length;
369 int ret;
370 369
371 user_data = (char __user *) (uintptr_t) args->data_ptr; 370 user_data = (char __user *) (uintptr_t) args->data_ptr;
372 remain = args->size; 371 remain = args->size;
373 372
374 ret = i915_mutex_lock_interruptible(dev);
375 if (ret)
376 return ret;
377
378 ret = i915_gem_object_get_pages(obj, 0);
379 if (ret != 0)
380 goto fail_unlock;
381
382 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
383 args->size);
384 if (ret != 0)
385 goto fail_put_pages;
386
387 obj_priv = to_intel_bo(obj); 373 obj_priv = to_intel_bo(obj);
388 offset = args->offset; 374 offset = args->offset;
389 375
@@ -400,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
400 if ((page_offset + remain) > PAGE_SIZE) 386 if ((page_offset + remain) > PAGE_SIZE)
401 page_length = PAGE_SIZE - page_offset; 387 page_length = PAGE_SIZE - page_offset;
402 388
403 ret = fast_shmem_read(obj_priv->pages, 389 if (fast_shmem_read(obj_priv->pages,
404 page_base, page_offset, 390 page_base, page_offset,
405 user_data, page_length); 391 user_data, page_length))
406 if (ret) 392 return -EFAULT;
407 goto fail_put_pages;
408 393
409 remain -= page_length; 394 remain -= page_length;
410 user_data += page_length; 395 user_data += page_length;
411 offset += page_length; 396 offset += page_length;
412 } 397 }
413 398
414fail_put_pages: 399 return 0;
415 i915_gem_object_put_pages(obj);
416fail_unlock:
417 mutex_unlock(&dev->struct_mutex);
418
419 return ret;
420} 400}
421 401
422static int 402static int
@@ -477,33 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
477 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 457 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
478 num_pages = last_data_page - first_data_page + 1; 458 num_pages = last_data_page - first_data_page + 1;
479 459
480 user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); 460 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
481 if (user_pages == NULL) 461 if (user_pages == NULL)
482 return -ENOMEM; 462 return -ENOMEM;
483 463
464 mutex_unlock(&dev->struct_mutex);
484 down_read(&mm->mmap_sem); 465 down_read(&mm->mmap_sem);
485 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 466 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
486 num_pages, 1, 0, user_pages, NULL); 467 num_pages, 1, 0, user_pages, NULL);
487 up_read(&mm->mmap_sem); 468 up_read(&mm->mmap_sem);
469 mutex_lock(&dev->struct_mutex);
488 if (pinned_pages < num_pages) { 470 if (pinned_pages < num_pages) {
489 ret = -EFAULT; 471 ret = -EFAULT;
490 goto fail_put_user_pages; 472 goto out;
491 } 473 }
492 474
493 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 475 ret = i915_gem_object_set_cpu_read_domain_range(obj,
494 476 args->offset,
495 ret = i915_mutex_lock_interruptible(dev); 477 args->size);
496 if (ret)
497 goto fail_put_user_pages;
498
499 ret = i915_gem_object_get_pages_or_evict(obj);
500 if (ret) 478 if (ret)
501 goto fail_unlock; 479 goto out;
502 480
503 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 481 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
504 args->size);
505 if (ret != 0)
506 goto fail_put_pages;
507 482
508 obj_priv = to_intel_bo(obj); 483 obj_priv = to_intel_bo(obj);
509 offset = args->offset; 484 offset = args->offset;
@@ -548,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
548 offset += page_length; 523 offset += page_length;
549 } 524 }
550 525
551fail_put_pages: 526out:
552 i915_gem_object_put_pages(obj);
553fail_unlock:
554 mutex_unlock(&dev->struct_mutex);
555fail_put_user_pages:
556 for (i = 0; i < pinned_pages; i++) { 527 for (i = 0; i < pinned_pages; i++) {
557 SetPageDirty(user_pages[i]); 528 SetPageDirty(user_pages[i]);
558 page_cache_release(user_pages[i]); 529 page_cache_release(user_pages[i]);
@@ -581,6 +552,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
581 return -ENOENT; 552 return -ENOENT;
582 obj_priv = to_intel_bo(obj); 553 obj_priv = to_intel_bo(obj);
583 554
555 ret = i915_mutex_lock_interruptible(dev);
556 if (ret) {
557 drm_gem_object_unreference_unlocked(obj);
558 return ret;
559 }
560
584 /* Bounds check source. */ 561 /* Bounds check source. */
585 if (args->offset > obj->size || args->size > obj->size - args->offset) { 562 if (args->offset > obj->size || args->size > obj->size - args->offset) {
586 ret = -EINVAL; 563 ret = -EINVAL;
@@ -604,17 +581,27 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
604 goto out; 581 goto out;
605 } 582 }
606 583
607 if (i915_gem_object_needs_bit17_swizzle(obj)) { 584 ret = i915_gem_object_get_pages_or_evict(obj);
608 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 585 if (ret)
609 } else { 586 goto out;
587
588 ret = i915_gem_object_set_cpu_read_domain_range(obj,
589 args->offset,
590 args->size);
591 if (ret)
592 goto out_put;
593
594 ret = -EFAULT;
595 if (!i915_gem_object_needs_bit17_swizzle(obj))
610 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 596 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
611 if (ret != 0) 597 if (ret == -EFAULT)
612 ret = i915_gem_shmem_pread_slow(dev, obj, args, 598 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
613 file_priv);
614 }
615 599
600out_put:
601 i915_gem_object_put_pages(obj);
616out: 602out:
617 drm_gem_object_unreference_unlocked(obj); 603 drm_gem_object_unreference(obj);
604 mutex_unlock(&dev->struct_mutex);
618 return ret; 605 return ret;
619} 606}
620 607
@@ -908,7 +895,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
908 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 895 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
909 num_pages = last_data_page - first_data_page + 1; 896 num_pages = last_data_page - first_data_page + 1;
910 897
911 user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); 898 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
912 if (user_pages == NULL) 899 if (user_pages == NULL)
913 return -ENOMEM; 900 return -ENOMEM;
914 901