aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-09-04 16:02:56 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-09-20 08:22:57 -0400
commitf60d7f0c1d55a935475ab394955cafddefaa6533 (patch)
tree016122c1b20e51d32ddc2916c84a7a4a719e58bc
parent755d22184f1e5015b040acee794542d9cf8a16c5 (diff)
drm/i915: Pin backing pages for pread
By using the recently introduced pinning of pages, we can safely drop the mutex in the knowledge that the pages are not going to disappear beneath us, and so we can simplify the code for iterating over the pages. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c36
1 files changed, 13 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 73702e583eb9..26c8bf9c5fa6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -343,7 +343,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
343 page_length); 343 page_length);
344 kunmap_atomic(vaddr); 344 kunmap_atomic(vaddr);
345 345
346 return ret; 346 return ret ? -EFAULT : 0;
347} 347}
348 348
349static void 349static void
@@ -394,7 +394,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
394 page_length); 394 page_length);
395 kunmap(page); 395 kunmap(page);
396 396
397 return ret; 397 return ret ? - EFAULT : 0;
398} 398}
399 399
400static int 400static int
@@ -403,7 +403,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
403 struct drm_i915_gem_pread *args, 403 struct drm_i915_gem_pread *args,
404 struct drm_file *file) 404 struct drm_file *file)
405{ 405{
406 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
407 char __user *user_data; 406 char __user *user_data;
408 ssize_t remain; 407 ssize_t remain;
409 loff_t offset; 408 loff_t offset;
@@ -412,7 +411,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
412 int hit_slowpath = 0; 411 int hit_slowpath = 0;
413 int prefaulted = 0; 412 int prefaulted = 0;
414 int needs_clflush = 0; 413 int needs_clflush = 0;
415 int release_page;
416 414
417 user_data = (char __user *) (uintptr_t) args->data_ptr; 415 user_data = (char __user *) (uintptr_t) args->data_ptr;
418 remain = args->size; 416 remain = args->size;
@@ -433,6 +431,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
433 } 431 }
434 } 432 }
435 433
434 ret = i915_gem_object_get_pages(obj);
435 if (ret)
436 return ret;
437
438 i915_gem_object_pin_pages(obj);
439
436 offset = args->offset; 440 offset = args->offset;
437 441
438 while (remain > 0) { 442 while (remain > 0) {
@@ -448,18 +452,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
448 if ((shmem_page_offset + page_length) > PAGE_SIZE) 452 if ((shmem_page_offset + page_length) > PAGE_SIZE)
449 page_length = PAGE_SIZE - shmem_page_offset; 453 page_length = PAGE_SIZE - shmem_page_offset;
450 454
451 if (obj->pages) { 455 page = obj->pages[offset >> PAGE_SHIFT];
452 page = obj->pages[offset >> PAGE_SHIFT];
453 release_page = 0;
454 } else {
455 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
456 if (IS_ERR(page)) {
457 ret = PTR_ERR(page);
458 goto out;
459 }
460 release_page = 1;
461 }
462
463 page_do_bit17_swizzling = obj_do_bit17_swizzling && 456 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
464 (page_to_phys(page) & (1 << 17)) != 0; 457 (page_to_phys(page) & (1 << 17)) != 0;
465 458
@@ -470,7 +463,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
470 goto next_page; 463 goto next_page;
471 464
472 hit_slowpath = 1; 465 hit_slowpath = 1;
473 page_cache_get(page);
474 mutex_unlock(&dev->struct_mutex); 466 mutex_unlock(&dev->struct_mutex);
475 467
476 if (!prefaulted) { 468 if (!prefaulted) {
@@ -488,16 +480,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
488 needs_clflush); 480 needs_clflush);
489 481
490 mutex_lock(&dev->struct_mutex); 482 mutex_lock(&dev->struct_mutex);
491 page_cache_release(page); 483
492next_page: 484next_page:
493 mark_page_accessed(page); 485 mark_page_accessed(page);
494 if (release_page)
495 page_cache_release(page);
496 486
497 if (ret) { 487 if (ret)
498 ret = -EFAULT;
499 goto out; 488 goto out;
500 }
501 489
502 remain -= page_length; 490 remain -= page_length;
503 user_data += page_length; 491 user_data += page_length;
@@ -505,6 +493,8 @@ next_page:
505 } 493 }
506 494
507out: 495out:
496 i915_gem_object_unpin_pages(obj);
497
508 if (hit_slowpath) { 498 if (hit_slowpath) {
509 /* Fixup: Kill any reinstated backing storage pages */ 499 /* Fixup: Kill any reinstated backing storage pages */
510 if (obj->madv == __I915_MADV_PURGED) 500 if (obj->madv == __I915_MADV_PURGED)